config.name,config.backend.name,config.backend.version,config.backend._target_,config.backend.task,config.backend.library,config.backend.model,config.backend.processor,config.backend.device,config.backend.device_ids,config.backend.seed,config.backend.inter_op_num_threads,config.backend.intra_op_num_threads,config.backend.model_kwargs.trust_remote_code,config.backend.processor_kwargs.trust_remote_code,config.backend.hub_kwargs.trust_remote_code,config.backend.no_weights,config.backend.device_map,config.backend.torch_dtype,config.backend.eval_mode,config.backend.to_bettertransformer,config.backend.low_cpu_mem_usage,config.backend.attn_implementation,config.backend.cache_implementation,config.backend.autocast_enabled,config.backend.autocast_dtype,config.backend.torch_compile,config.backend.torch_compile_target,config.backend.quantization_scheme,config.backend.quantization_config.bits,config.backend.quantization_config.version,config.backend.deepspeed_inference,config.backend.peft_type,config.scenario.name,config.scenario._target_,config.scenario.iterations,config.scenario.duration,config.scenario.warmup_runs,config.scenario.input_shapes.batch_size,config.scenario.input_shapes.num_choices,config.scenario.input_shapes.sequence_length,config.scenario.new_tokens,config.scenario.latency,config.scenario.memory,config.scenario.energy,config.scenario.generate_kwargs.max_new_tokens,config.scenario.generate_kwargs.min_new_tokens,config.launcher.name,config.launcher._target_,config.launcher.device_isolation,config.launcher.device_isolation_action,config.launcher.numactl,config.launcher.start_method,config.environment.cpu,config.environment.cpu_count,config.environment.cpu_ram_mb,config.environment.system,config.environment.machine,config.environment.platform,config.environment.processor,config.environment.python_version,config.environment.gpu,config.environment.gpu_count,config.environment.gpu_vram_mb,config.environment.optimum_benchmark_version,config.environment.optimum_benchmark_commit,config.environment.transformers_version,config.environment.transformers_commit,config.environment.accelerate_version,config.environment.accelerate_commit,config.environment.diffusers_version,config.environment.diffusers_commit,config.environment.optimum_version,config.environment.optimum_commit,config.environment.timm_version,config.environment.timm_commit,config.environment.peft_version,config.environment.peft_commit,report.traceback,config.backend.hub_kwargs.revision,config.backend.hub_kwargs.force_download,config.backend.hub_kwargs.local_files_only,report.prefill.memory.unit,report.prefill.memory.max_ram,report.prefill.memory.max_global_vram,report.prefill.memory.max_process_vram,report.prefill.memory.max_reserved,report.prefill.memory.max_allocated,report.prefill.latency.unit,report.prefill.latency.count,report.prefill.latency.total,report.prefill.latency.mean,report.prefill.latency.stdev,report.prefill.latency.p50,report.prefill.latency.p90,report.prefill.latency.p95,report.prefill.latency.p99,report.prefill.latency.values,report.prefill.throughput.unit,report.prefill.throughput.value,report.prefill.energy.unit,report.prefill.energy.cpu,report.prefill.energy.ram,report.prefill.energy.gpu,report.prefill.energy.total,report.prefill.efficiency.unit,report.prefill.efficiency.value,report.decode.memory.unit,report.decode.memory.max_ram,report.decode.memory.max_global_vram,report.decode.memory.max_process_vram,report.decode.memory.max_reserved,report.decode.memory.max_allocated,report.decode.latency.unit,report.decode.latency.count,report.decode.latency.total,report.decode.latency.mean,report.decode.latency.stdev,report.decode.latency.p50,report.decode.latency.p90,report.decode.latency.p95,report.decode.latency.p99,report.decode.latency.values,report.decode.throughput.unit,report.decode.throughput.value,report.decode.energy.unit,report.decode.energy.cpu,report.decode.energy.ram,report.decode.energy.gpu,report.decode.energy.total,report.decode.efficiency.unit,report.decode.efficiency.value,report.per_token.memory,report.per_token.latency.unit,report.per_token.latency.count,report.per_token.latency.total,report.per_token.latency.mean,report.per_token.latency.stdev,report.per_token.latency.p50,report.per_token.latency.p90,report.per_token.latency.p95,report.per_token.latency.p99,report.per_token.latency.values,report.per_token.throughput.unit,report.per_token.throughput.value,report.per_token.energy,report.per_token.efficiency,config.backend.quantization_config.exllama_config.version,config.backend.quantization_config.exllama_config.max_input_len,config.backend.quantization_config.exllama_config.max_batch_size 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b8f-6dd19337464d9f7d653d4378;a56795cf-71df-4e6c-aae1-849c7b823365) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33b1-1e06dc7465f12b634a93e159;e9651330-b69e-44e6-80aa-aabfe16750ff) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 430, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3704, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1490, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1656, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3409-23a651e23c6c5237627202b4;8149ac2a-133c-4958-9196-bfb0e06aed96) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32ae-5487bb9b03081f49335b9e0f;228d323b-a7dc-40c7-9d75-9e334207af0a) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30fd-6fdbfe5a7daafefc45aecc6a;6e92737a-1d18-44ad-ade6-1d5e09e9f5db) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c34-0c028f301eb5988e7d574806;ed01cfd0-5024-4d19-b550-0d5fd2d974d9) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3304-1c6eaa1e0392813376a221da;ec3e3966-0ee4-4f3a-a734-36c89c0a3695) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 639, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30a5-5874597f31cf8473184d6bbd;6e5b330f-e17d-40f3-bdfe-661ff2a575eb) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34b6-6c842a3661b1853b4506606c;b5129e7e-c190-4435-85de-2dc97ea2d74a) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fe2-1ecfafc627b03914785c7c1b;bfe9e82d-7fd0-451a-848a-88362a1867cb) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3a6-40cb732c18a03bb327d98f0f;10d38fe2-6566-4d10-8c5d-a07f9ce77e69) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3fc-7b2843204b8bc92a1a2708df;98c25e10-d2f0-4a76-956f-54a4290a6f61) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2fe-23d8d11144cc9f077e6703d8;8a363630-8a7c-4ab9-bf90-f6c9991523f9) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1166, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1045, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 776, in forward attn_outputs, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 658, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc351-3b67e9fb23f57b0e45a83705;5c8d6550-de3f-49a6-8597-02385a458b31) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f34-274013b966a51c813c78d501;c47d0a13-e4fd-42ba-b6ca-d0fb5231234f) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 614, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ede-75f862e422948dc75ae01b20;196eb5ba-12be-4010-96e7-379418ef828b) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3203-3c01912f62fe8c7d0169bc51;31697d2c-af91-4dd5-9c47-d9b99703641b) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 430, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 430, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-6677948f-7a70341d2dd093eb12d83c7d;a96454fe-1af9-4c52-9df2-6ac2e2dc5777) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31ac-08e33e65343ed6c47d63ab85;b7c2af8a-0309-4874-b4c4-20adc53d8e9f) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe63-54344de20c9808a15f5086f5;a2d2eef6-8964-4cf4-bc16-0317302b7701) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a303f-080d309e1522e65f63e34b4c;cb4bf444-cc41-474c-9a41-8d61db82fe18) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 639, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 614, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1204, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1004, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 625, in forward qkv_states = self.wqkv(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,2082.578432,5566.365696,0.0,4919.918592,4635.53792,s,10,5.0974686889648435,0.5097468688964844,0.002523113409812242,0.5091204681396484,0.5118887878417968,0.5140431793212891,0.5157666925048828,"[0.5161975708007812, 0.508827880859375, 0.5094269104003907, 0.508350830078125, 0.5081865539550782, 0.5073575439453125, 0.5094130554199219, 0.5110750427246094, 0.5072232666015625, 0.5114100341796874]",tokens/s,502.2100489880333,kWh,5.994651640454928e-06,3.28480666631549e-06,2.7987258500894452e-05,3.726671680766487e-05,tokens/kWh,6869400.417569034,MB,2082.578432,5566.365696,0.0,4919.918592,4794.464768,s,10,295.3985,29.53985,0.004666201106872227,29.538681640625,29.5469361328125,29.54772978515625,29.548364707031247,"[29.534837890625, 29.546759765625, 29.542654296875, 29.540826171875, 29.5363359375, 29.535224609375, 29.54083984375, 29.5359609375, 29.5485234375, 29.536537109375]",tokens/s,2.132712251416307,kWh,0.0003485870901164082,0.0001910528620806053,0.0016025825737317144,0.002142222525928728,tokens/kWh,29408.709523623045,,s,629,299.5096441345215,0.47616795569876236,0.06052059742525611,0.46878207397460936,0.46948126220703124,0.4697759826660156,0.977483603515625,"[0.4688783264160156, 0.46847384643554685, 0.46878515625, 0.4694343566894531, 0.4695541687011719, 0.46879034423828125, 0.4688465270996094, 0.46848306274414064, 0.46828851318359377, 0.4686806945800781, 0.46836428833007815, 0.4685475769042969, 0.4686540832519531, 0.46948455810546874, 0.4689162292480469, 0.46845745849609377, 0.4684011535644531, 0.4683714599609375, 0.4684134521484375, 0.46870220947265623, 0.468790283203125, 0.4682076110839844, 0.4684062805175781, 0.46837042236328125, 0.469607421875, 0.46908721923828123, 0.4688885803222656, 0.4687656860351562, 0.4693012390136719, 0.4686929931640625, 0.46864382934570314, 0.468430908203125, 0.4682413330078125, 0.4685291442871094, 0.46904525756835935, 0.4706498413085938, 0.46837042236328125, 0.4685875244140625, 0.4687718505859375, 0.4685537414550781, 0.46882406616210937, 0.46856805419921876, 0.4686806945800781, 0.46866943359375, 0.4686581726074219, 0.4682403869628906, 0.4695777893066406, 0.4685884704589844, 0.4686663818359375, 0.4685537414550781, 0.46855987548828126, 0.4685066223144531, 0.4687831115722656, 0.46864794921875, 0.4684666748046875, 0.4691046447753906, 0.46919476318359377, 0.4684984436035156, 0.4691128234863281, 0.4691221008300781, 0.46927557373046874, 0.46959002685546875, 0.977623046875, 0.46814208984375, 0.4683407287597656, 0.46863873291015623, 0.46877490234375, 0.46880459594726565, 0.4687513732910156, 0.46857113647460935, 0.468421630859375, 0.46894284057617186, 0.4688281555175781, 0.46850765991210935, 0.4687431640625, 0.4691404724121094, 0.4699504699707031, 0.46978561401367186, 0.46937701416015626, 0.4690975341796875, 0.4691311950683594, 0.47030169677734374, 0.4690933837890625, 0.4689039306640625, 0.4690298767089844, 0.46860595703125, 0.4687359924316406, 0.4692275085449219, 0.46936166381835936, 0.46901248168945314, 0.46843902587890623, 0.46878106689453125, 0.46938113403320314, 0.46973849487304686, 0.46935861206054685, 0.4687390441894531, 0.4685845031738281, 0.46890591430664064, 0.46899813842773436, 0.4688281555175781, 0.4687083435058594, 0.4687564697265625, 0.4687308654785156, 0.4687615966796875, 0.4689858703613281, 0.4688762817382813, 0.46893264770507814, 0.46929302978515625, 0.46915994262695315, 0.4696995849609375, 0.4688701477050781, 0.4687912902832031, 0.4691885986328125, 0.46969558715820314, 0.4696114196777344, 0.4692490234375, 0.46899301147460937, 0.46896435546875, 0.4694558715820312, 0.46925927734375, 0.4685393981933594, 0.4686530456542969, 0.46877902221679685, 0.4687155151367188, 0.46934527587890623, 0.9787330322265625, 0.468917236328125, 0.46886502075195313, 0.46889984130859375, 0.4691875915527344, 0.4684267578125, 0.46861004638671877, 0.46856500244140625, 0.46833560180664063, 0.4684933166503906, 0.4683735046386719, 0.4683263854980469, 0.4683735046386719, 0.468236328125, 0.46872265625, 0.4684236755371094, 0.46842572021484374, 0.46854452514648437, 0.46859982299804687, 0.4687155151367188, 0.46844732666015626, 0.4685188293457031, 0.4686991577148438, 0.46980194091796873, 0.46855987548828126, 0.4690032653808594, 0.46884454345703125, 0.4687575073242187, 0.4688609313964844, 0.4688558044433594, 0.4689459228515625, 0.46866329956054686, 0.4685557861328125, 0.46875238037109374, 0.4686827392578125, 0.46909747314453126, 0.46889984130859375, 0.4689756164550781, 0.46924798583984373, 0.46901144409179685, 0.4689469299316406, 0.46897048950195314, 0.46984909057617186, 0.4700190734863281, 0.4696781005859375, 0.469550048828125, 0.4697763977050781, 0.47159500122070314, 0.46917837524414063, 0.4684482421875, 0.4692193298339844, 0.46906060791015625, 0.4689141845703125, 0.46894284057617186, 0.468885498046875, 0.469064697265625, 0.46948043823242186, 0.4687861633300781, 0.4690513916015625, 0.46873907470703124, 0.46875341796875, 0.4690708618164062, 0.46917938232421874, 0.9775615844726563, 0.46874624633789064, 0.46858853149414065, 0.4686796875, 0.46987161254882814, 0.46889370727539065, 0.4687656860351562, 0.468790283203125, 0.4687278137207031, 0.46867864990234376, 0.4685322265625, 0.46837454223632813, 0.4686530456542969, 0.46875955200195313, 0.46893466186523436, 0.468706298828125, 0.4689858703613281, 0.46853018188476564, 0.46881585693359373, 0.46894796752929685, 0.4691353454589844, 0.46871038818359373, 0.46851071166992186, 0.468632568359375, 0.46843902587890623, 0.46874725341796875, 0.468890625, 0.4688670654296875, 0.46844723510742187, 0.468490234375, 0.4689090576171875, 0.46941900634765626, 0.46909030151367187, 0.4689100952148437, 0.4687175598144531, 0.46938113403320314, 0.4690616455078125, 0.4687718505859375, 0.46904730224609376, 0.4689336242675781, 0.4687933349609375, 0.46920501708984375, 0.46855471801757814, 0.4687718505859375, 0.47131646728515625, 0.4688424987792969, 0.46912005615234376, 0.4692090148925781, 0.4688189392089844, 0.4685926513671875, 0.4688619384765625, 0.46930743408203124, 0.4693247680664063, 0.4690636901855469, 0.46892135620117187, 0.4687145690917969, 0.4691373291015625, 0.4687145080566406, 0.4686673889160156, 0.46867352294921877, 0.46872677612304686, 0.4689254455566406, 0.46927871704101565, 0.9772830810546875, 0.46864077758789063, 0.4684922790527344, 0.46857421875, 0.46906878662109375, 0.4690033264160156, 0.46841543579101563, 0.4685895690917969, 0.46849639892578127, 0.468279296875, 0.46869912719726564, 0.4685619201660156, 0.4687575073242187, 0.46905242919921875, 0.46849432373046873, 0.46846875, 0.46872576904296875, 0.46877490234375, 0.4686592102050781, 0.4684892272949219, 0.4687974548339844, 0.46863565063476564, 0.4685844421386719, 0.46846157836914065, 0.46825778198242185, 0.4684994506835938, 0.4686448669433594, 0.46830081176757815, 0.46856805419921876, 0.4685209655761719, 0.46874929809570315, 0.4689264526367187, 0.46845541381835937, 0.4686520385742188, 0.46867770385742186, 0.46937490844726565, 0.46933709716796873, 0.4687554626464844, 0.4690616455078125, 0.46915789794921875, 0.46891213989257813, 0.46888754272460936, 0.4687278137207031, 0.46915277099609376, 0.4688332824707031, 0.4693790588378906, 0.46926437377929686, 0.46878515625, 0.4690831298828125, 0.46895718383789065, 0.4689776611328125, 0.4690616455078125, 0.46907391357421874, 0.46851788330078126, 0.46869195556640625, 0.4686376953125, 0.46900634765625, 0.4722012023925781, 0.4687718505859375, 0.4688670654296875, 0.46880459594726565, 0.468642822265625, 0.4693155822753906, 0.9763594360351563, 0.46846771240234375, 0.46886502075195313, 0.4687083740234375, 0.4689837646484375, 0.4684646301269531, 0.4687247314453125, 0.46836224365234375, 0.4683929748535156, 0.4685619201660156, 0.46836737060546874, 0.4687503356933594, 0.46857830810546874, 0.46878411865234376, 0.4683345947265625, 0.468358154296875, 0.46845849609375, 0.4687032470703125, 0.4686520385742188, 0.4686458740234375, 0.46885784912109374, 0.46872677612304686, 0.4685823974609375, 0.46873095703125, 0.4685475158691406, 0.468790283203125, 0.46927053833007815, 0.46861822509765627, 0.46847589111328125, 0.46836224365234375, 0.470181884765625, 0.4690575256347656, 0.4688353271484375, 0.4692449340820313, 0.4685137939453125, 0.46857217407226565, 0.46875238037109374, 0.46873190307617185, 0.4689664001464844, 0.4691271667480469, 0.4686090087890625, 0.4692101135253906, 0.4687974548339844, 0.469317626953125, 0.4686315612792969, 0.46899917602539065, 0.4692490234375, 0.4690309143066406, 0.4687646789550781, 0.46850253295898436, 0.4687083435058594, 0.46883941650390626, 0.46927871704101565, 0.4686152038574219, 0.4694292297363281, 0.4691435546875, 0.46974566650390626, 0.4686940307617187, 0.468969482421875, 0.46890188598632815, 0.46895001220703125, 0.4687564697265625, 0.46904730224609376, 0.9786060791015625, 0.46841650390625, 0.4683786315917969, 0.46915994262695315, 0.46917938232421874, 0.4692777099609375, 0.46883636474609375, 0.468716552734375, 0.46857318115234375, 0.4691128234863281, 0.468600830078125, 0.4694783935546875, 0.46952960205078126, 0.4696739807128906, 0.4687196044921875, 0.46852505493164065, 0.46841854858398435, 0.4683591613769531, 0.4683100280761719, 0.4684431457519531, 0.4690411376953125, 0.4687923278808594, 0.4687216491699219, 0.46836224365234375, 0.4686090087890625, 0.4691363830566406, 0.46857113647460935, 0.46862335205078126, 0.46869094848632814, 0.4686581726074219, 0.4694435729980469, 0.4694640502929687, 0.46918450927734373, 0.46924288940429687, 0.4694343566894531, 0.4690462646484375, 0.469317626953125, 0.46890188598632815, 0.46938323974609375, 0.4693267822265625, 0.46866329956054686, 0.4690370483398438, 0.468864013671875, 0.4687575073242187, 0.4686438598632813, 0.46876776123046876, 0.46984698486328125, 0.4690380859375, 0.46935552978515627, 0.46853836059570314, 0.46839910888671876, 0.46874008178710935, 0.46875955200195313, 0.4687667236328125, 0.4688209838867187, 0.4691302490234375, 0.46921624755859376, 0.4689674377441406, 0.46906777954101564, 0.46867864990234376, 0.46859161376953123, 0.46864794921875, 0.4688332824707031, 0.980326416015625, 0.468701171875, 0.46863360595703124, 0.468738037109375, 0.46885989379882814, 0.4684267578125, 0.468389892578125, 0.46853839111328127, 0.46828131103515624, 0.4683601989746094, 0.46820352172851565, 0.467962890625, 0.4681553955078125, 0.4686499938964844, 0.4686612548828125, 0.4687503356933594, 0.46850253295898436, 0.4680570983886719, 0.4683458557128906, 0.46834994506835936, 0.46889471435546876, 0.46862130737304686, 0.4687196044921875, 0.46831512451171875, 0.4685035400390625, 0.4694077453613281, 0.4687421569824219, 0.4688087158203125, 0.4701829528808594, 0.4684830322265625, 0.4688619384765625, 0.4692244567871094, 0.46869094848632814, 0.4689715270996094, 0.46905548095703126, 0.4686315612792969, 0.468864013671875, 0.4689029235839844, 0.46895001220703125, 0.46892340087890627, 0.46898687744140627, 0.469212158203125, 0.46885272216796875, 0.4688230285644531, 0.46921624755859376, 0.46866943359375, 0.46926849365234374, 0.46906777954101564, 0.469248046875, 0.4691763000488281, 0.46952960205078126, 0.4694057006835938, 0.46893157958984377, 0.4688035888671875, 0.4690667419433594, 0.4687615966796875, 0.4690288696289063, 0.46915890502929686, 0.46848818969726563, 0.4687585144042969, 0.4686315612792969, 0.4686049194335937, 0.46867352294921877, 0.9793843383789063, 0.4697733154296875, 0.46955825805664064, 0.46915890502929686, 0.46897869873046877, 0.4683345947265625, 0.46859982299804687, 0.468316162109375, 0.46864694213867186, 0.4686110534667969, 0.46845745849609377, 0.46816357421875, 0.4684892272949219, 0.46949169921875, 0.46866842651367185, 0.46861312866210936, 0.4686090087890625, 0.4685424499511719, 0.46886605834960937, 0.4688752746582031, 0.46871353149414063, 0.4691127624511719, 0.46852615356445315, 0.46842361450195313, 0.46897665405273437, 0.4689141845703125, 0.4685926513671875, 0.46885989379882814, 0.468421630859375, 0.4683888549804687, 0.46856600952148436, 0.4689182739257812, 0.46858547973632814, 0.46870220947265623, 0.4691896362304688, 0.46975283813476565, 0.46981631469726565, 0.4697753601074219, 0.470096923828125, 0.469834716796875, 0.46959820556640625, 0.4698542175292969, 0.46952243041992187, 0.4697272338867188, 0.4697907104492188, 0.4687656860351562, 0.4691937255859375, 0.46966680908203123, 0.46944461059570314, 0.46914764404296877, 0.469760009765625, 0.4693544921875, 0.46949993896484377, 0.4696063537597656, 0.46877694702148437, 0.4688281555175781, 0.4692244567871094, 0.468706298828125, 0.4684646301269531, 0.46868582153320315, 0.4686725158691406, 0.46858547973632814, 0.4686612548828125, 0.9803509521484375, 0.46829159545898436, 0.4681697387695313, 0.4690083923339844, 0.46880972290039064, 0.46866329956054686, 0.4685547485351563, 0.46837042236328125, 0.468105224609375, 0.4683243713378906, 0.46842364501953127, 0.4681779174804688, 0.4683816833496094, 0.4685599365234375, 0.4682454528808594, 0.46819635009765626, 0.46841036987304685, 0.46834994506835936, 0.468537353515625, 0.46845849609375, 0.46887115478515623, 0.46867352294921877, 0.46852197265625, 0.46868377685546875, 0.46900018310546876, 0.46878207397460936, 0.4686499938964844, 0.4685537414550781, 0.46879437255859374, 0.4684031982421875, 0.4687779846191406, 0.4690032653808594, 0.4684912719726563, 0.46856298828125, 0.46899917602539065, 0.4696033020019531, 0.4696708984375, 0.469855224609375, 0.4690411376953125, 0.468674560546875, 0.46875442504882814, 0.469359619140625, 0.4701552734375, 0.469064697265625, 0.46861312866210936, 0.4686315612792969, 0.4697047119140625, 0.4690083923339844, 0.46888754272460936, 0.46868582153320315, 0.469073974609375, 0.4693411254882813, 0.4695132141113281, 0.46920089721679686, 0.46941696166992186, 0.46919271850585936, 0.46985626220703125, 0.46913946533203127, 0.46915789794921875, 0.4686253967285156, 0.46875955200195313, 0.468642822265625, 0.4691517333984375]",tokens/s,2.1000993200656053,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2170.855424,7227.31008,0.0,6580.862976,6226.036224,s,10,5.757403198242186,0.5757403198242188,0.0009922788511109175,0.5756337280273438,0.5767303344726562,0.5773405700683594,0.5778287585449219,"[0.5763906860351562, 0.5754805908203126, 0.5742902221679688, 0.5758182373046875, 0.5751873168945313, 0.574752685546875, 0.575786865234375, 0.5751510620117187, 0.5779508056640625, 0.5765947265625]",tokens/s,444.6449053249566,kWh,6.784125003549789e-06,3.717420948032668e-06,3.101488592300367e-05,4.151643187458613e-05,tokens/kWh,6166233.1862557735,MB,2170.855424,7227.31008,0.0,6580.862976,6478.658048,s,10,336.90312500000005,33.6903125,0.0038578817961585758,33.690537109375,33.6951453125,33.6965375,33.69765125,"[33.6891484375, 33.6979296875, 33.68431640625, 33.69015234375, 33.69138671875, 33.68830859375, 33.690921875, 33.6910078125, 33.6948359375, 33.6851171875]",tokens/s,1.8699737498724598,kWh,0.00039786197528243067,0.0002180628312197768,0.001800652746076575,0.0024165775525787823,tokens/kWh,26069.926840448938,,s,629,341.54916522216826,0.5430034423245914,0.06841490320905963,0.5347440795898437,0.5353432861328125,0.5355950317382813,1.1101828955078124,"[0.5346764526367187, 0.534593505859375, 0.53607421875, 0.5346488037109375, 0.5349376220703125, 0.5348045043945312, 0.5348710327148437, 0.534361083984375, 0.5351505737304687, 0.5344501953125, 0.5342750854492188, 0.5343682861328125, 0.5348853759765625, 0.5342218017578125, 0.5351854248046874, 0.5346846923828125, 0.5351168212890625, 0.5340159912109375, 0.5345996704101562, 0.5340569458007812, 0.5348679809570313, 0.5342392578125, 0.5346948852539063, 0.5346427001953125, 0.5350113525390625, 0.5345361938476563, 0.534276123046875, 0.5349284057617187, 0.5341531982421875, 0.5347072143554688, 0.534413330078125, 0.5346447143554688, 0.5343323974609375, 0.534677490234375, 0.5346836547851562, 0.5348843383789063, 0.5341747436523437, 0.53469287109375, 0.5349488525390625, 0.5349376220703125, 0.5349048461914062, 0.5350922241210937, 0.5349293823242187, 0.5348905029296875, 0.5348187866210937, 0.5352499389648437, 0.5351372680664063, 0.5351874389648438, 0.5344020385742188, 0.5347307739257813, 0.534319091796875, 0.5348607788085937, 0.53452490234375, 0.5352724609375, 0.5355673828125, 0.53477685546875, 0.5353441162109375, 0.5347553100585938, 0.535225341796875, 0.5340364990234375, 0.5348362426757812, 0.53452392578125, 1.1103118896484374, 0.5353318481445313, 0.5341173706054687, 0.5345740966796875, 0.5343201293945312, 0.5346703491210938, 0.5341255493164062, 0.5354373168945312, 0.534645751953125, 0.5345771484375, 0.5340641479492187, 0.535025634765625, 0.5342791748046875, 0.5352877807617188, 0.5341102294921874, 0.5349335327148438, 0.53408154296875, 0.5348935546875, 0.5341614379882812, 0.5348187866210937, 0.5348075561523438, 0.5348945922851562, 0.5341737060546875, 0.5358602294921875, 0.5343109130859375, 0.53522021484375, 0.5344286499023437, 0.5350133666992187, 0.5340877075195313, 0.5351638793945312, 0.5342811889648438, 0.535573486328125, 0.5346611328125, 0.5351854248046874, 0.5346078491210937, 0.53532568359375, 0.5343160400390625, 0.53486181640625, 0.5339535522460938, 0.5352847290039062, 0.5343467407226562, 0.5350491943359375, 0.5341419677734375, 0.5348751220703125, 0.534150146484375, 0.5347573852539063, 0.5344307250976562, 0.5354004516601563, 0.5344286499023437, 0.5352489013671875, 0.5350963134765625, 0.5354454956054687, 0.537660400390625, 0.5361489868164062, 0.53553662109375, 0.5356922607421875, 0.5343662109375, 0.5351168212890625, 0.535394287109375, 0.5363414916992187, 0.5349365844726562, 0.5353850708007812, 0.5346826171875, 1.110350830078125, 0.5346856689453126, 0.5343385620117187, 0.5345433349609375, 0.5344020385742188, 0.5345515747070313, 0.5340989379882812, 0.5348003540039062, 0.5341388549804688, 0.53479833984375, 0.5344071655273438, 0.5343672485351563, 0.5352960205078126, 0.5340405883789062, 0.5349048461914062, 0.5344491577148438, 0.535067626953125, 0.5343518676757812, 0.5349970092773437, 0.534128662109375, 0.5347153930664063, 0.53432421875, 0.5345413208007812, 0.5351311645507812, 0.5347737426757813, 0.534719482421875, 0.5343641357421876, 0.5350553588867187, 0.5344020385742188, 0.5347225341796875, 0.5345587158203124, 0.5351331787109375, 0.5342013549804687, 0.5347911376953125, 0.5345372314453125, 0.5347225341796875, 0.5342412719726563, 0.5348792114257812, 0.5342689208984375, 0.5345525512695313, 0.5345115966796875, 0.5345474853515625, 0.53495703125, 0.53488232421875, 0.535014404296875, 0.5344368896484375, 0.53495703125, 0.534287353515625, 0.5348126831054687, 0.5351044921875, 0.535436279296875, 0.53448193359375, 0.5350891723632812, 0.5339913940429688, 0.5349908447265626, 0.5347307739257813, 0.5351055297851562, 0.5346611328125, 0.5345126342773437, 0.5350236206054687, 0.5345863647460938, 0.5352509155273437, 0.5350174560546875, 1.1105382080078126, 0.5355950317382813, 0.5344696044921875, 0.5347164306640625, 0.5345341186523438, 0.5346948852539063, 0.5343866577148437, 0.535103515625, 0.5347359008789062, 0.5351536865234375, 0.5343518676757812, 0.5350891723632812, 0.5348259887695312, 0.5349007568359375, 0.5342996215820313, 0.5354127197265625, 0.5349365844726562, 0.5348239135742188, 0.5341798095703125, 0.534992919921875, 0.534329345703125, 0.5348515625, 0.5342095336914062, 0.53524072265625, 0.5343549194335937, 0.534803466796875, 0.534345703125, 0.534719482421875, 0.5346047973632813, 0.5349662475585938, 0.5341337890625, 0.5350051879882812, 0.5342566528320313, 0.5347942504882812, 0.5347379150390625, 0.5350942993164063, 0.5344153442382813, 0.5352182006835937, 0.534846435546875, 0.5348956298828125, 0.5342689208984375, 0.5350553588867187, 0.5349601440429688, 0.53513525390625, 0.534561767578125, 0.535309326171875, 0.5342945556640625, 0.5351065673828125, 0.5344860229492188, 0.53475634765625, 0.534240234375, 0.5348341674804687, 0.5342648315429688, 0.534993896484375, 0.5347952880859375, 0.5356380004882813, 0.534824951171875, 0.5348976440429688, 0.5344174194335938, 0.5349017333984375, 0.5350840454101562, 0.5351629028320313, 0.5344470825195312, 1.1098358154296875, 0.5345259399414063, 0.5348362426757812, 0.534181884765625, 0.5347102661132812, 0.534091796875, 0.5347164306640625, 0.534054931640625, 0.5361285400390625, 0.5355950317382813, 0.5359862060546875, 0.5356984252929687, 0.5359247436523438, 0.5353123779296876, 0.5349468383789062, 0.53486181640625, 0.5354373168945312, 0.5347164306640625, 0.5351342163085937, 0.5345413208007812, 0.5349837036132813, 0.53418701171875, 0.5347440795898437, 0.5342218017578125, 0.5345996704101562, 0.5342843017578125, 0.5351854248046874, 0.5345218505859375, 0.5348915405273438, 0.5342116088867187, 0.5350205688476563, 0.534297607421875, 0.534667236328125, 0.5343784790039062, 0.5350491943359375, 0.5344635009765625, 0.535109619140625, 0.5345679321289063, 0.5353430786132812, 0.5343150024414063, 0.5344952392578125, 0.5345228881835937, 0.5347676391601562, 0.53477783203125, 0.534740966796875, 0.5348997192382813, 0.5346795654296875, 0.53429248046875, 0.534645751953125, 0.5347973022460938, 0.5346099243164063, 0.5341378784179688, 0.5352919311523437, 0.5342832641601563, 0.5348894653320313, 0.53421875, 0.5347788696289062, 0.534635498046875, 0.5353768920898437, 0.5347850341796875, 0.5351485595703125, 0.5345423583984374, 0.5350031127929687, 1.1105545654296876, 0.5355919189453126, 0.5343006591796875, 0.5348720703125, 0.53427197265625, 0.5345126342773437, 0.5340037231445313, 0.535520263671875, 0.534739990234375, 0.535130126953125, 0.5343836059570313, 0.5350440673828125, 0.5346948852539063, 0.5351239624023437, 0.5344174194335938, 0.5351260375976562, 0.5345014038085938, 0.5347993774414063, 0.534582275390625, 0.5357168579101562, 0.5343109130859375, 0.5353820190429688, 0.5346990356445313, 0.5351629028320313, 0.5347451171875, 0.5354332275390625, 0.5348679809570313, 0.5348095703125, 0.5343733520507813, 0.5351966552734375, 0.5344102172851563, 0.5353021240234375, 0.5340569458007812, 0.534813720703125, 0.5341829223632812, 0.5347389526367188, 0.5341173706054687, 0.5348935546875, 0.5345310668945312, 0.535056396484375, 0.5342576904296875, 0.5349898071289062, 0.5346007080078125, 0.5354352416992187, 0.53414501953125, 0.5347205200195313, 0.5340692749023438, 0.5347993774414063, 0.5341737060546875, 0.53488330078125, 0.5342361450195312, 0.5347962646484375, 0.53441845703125, 0.53486181640625, 0.5344409790039063, 0.5348505859375, 0.5344778442382813, 0.5346652221679687, 0.5343754272460938, 0.5350758666992188, 0.5344163818359375, 0.5350686645507813, 0.5343856811523438, 1.1107747802734376, 0.5348556518554688, 0.5345413208007812, 0.5346017456054688, 0.5344235229492188, 0.5347625122070313, 0.534202392578125, 0.5346734008789062, 0.534513671875, 0.5345167236328126, 0.5345884399414063, 0.5348331298828125, 0.534603759765625, 0.5347123413085938, 0.535151611328125, 0.5338849487304688, 0.5345567016601562, 0.53408154296875, 0.5351454467773438, 0.5341777954101562, 0.5348731079101563, 0.534076416015625, 0.5348925170898438, 0.5345361938476563, 0.534635498046875, 0.534403076171875, 0.5345423583984374, 0.5348433837890625, 0.5344429931640625, 0.5346826171875, 0.5351076049804687, 0.5349652709960937, 0.5341634521484375, 0.5351157836914062, 0.534329345703125, 0.5346119384765625, 0.5346826171875, 0.5350000610351563, 0.5348444213867187, 0.5346211547851563, 0.5343887329101562, 0.5350768432617188, 0.53532568359375, 0.5349631958007812, 0.5353236694335938, 0.534540283203125, 0.5354454956054687, 0.5352796020507813, 0.5357506713867187, 0.5357987670898438, 0.5363568725585938, 0.5354977416992187, 0.5355079956054688, 0.5346631469726563, 0.5348648681640625, 0.53481982421875, 0.5350850830078125, 0.5346867065429688, 0.534488037109375, 0.5345198364257813, 0.5341511840820312, 0.5348761596679688, 0.5343672485351563, 1.1098511962890625, 0.5344020385742188, 0.5343488159179688, 0.5351004028320312, 0.5345003662109375, 0.5340426025390625, 0.5349335327148438, 0.534129638671875, 0.5345115966796875, 0.534624267578125, 0.5344778442382813, 0.5346898193359375, 0.5347532958984375, 0.5341439819335938, 0.5348208618164062, 0.5342740478515625, 0.534593505859375, 0.5343160400390625, 0.5345218505859375, 0.5341788330078125, 0.5349027709960937, 0.5340282592773438, 0.5352099609375, 0.5348515625, 0.5344050903320312, 0.534667236328125, 0.534846435546875, 0.5345792236328125, 0.5350031127929687, 0.5352366333007812, 0.5349683227539063, 0.5348577270507813, 0.5350656127929687, 0.5350123291015625, 0.5349437255859375, 0.5353164672851562, 0.535109619140625, 0.5345955810546875, 0.5346232299804687, 0.5350543212890625, 0.5351209106445313, 0.5352017822265625, 0.5351526489257813, 0.534813720703125, 0.5347174682617187, 0.53507275390625, 0.5347962646484375, 0.535278564453125, 0.5350348510742188, 0.5344235229492188, 0.5348864135742187, 0.5350287475585938, 0.5347123413085938, 0.5351557006835937, 0.5351854248046874, 0.534824951171875, 0.5352898559570313, 0.5345700073242188, 0.5350369262695313, 0.5348126831054687, 0.5350420532226563, 0.5343068237304688, 0.5352109985351563, 1.1140484619140625, 0.5357752075195312, 0.5347020874023437, 0.5347215576171875, 0.5340579833984375, 0.5347225341796875, 0.5339064331054687, 0.5348648681640625, 0.5343109130859375, 0.5346764526367187, 0.5340579833984375, 0.534751220703125, 0.5342074584960937, 0.53477685546875, 0.5345331420898437, 0.5362677612304687, 0.5344603881835938, 0.535562255859375, 0.534560791015625, 0.5345679321289063, 0.53437646484375, 0.5348444213867187, 0.5341480712890625, 0.5348433837890625, 0.5341439819335938, 0.5348792114257812, 0.5344849853515625, 0.5353707275390625, 0.534392822265625, 0.5349959716796875, 0.534276123046875, 0.5353820190429688, 0.5345740966796875, 0.5356973876953125, 0.534592529296875, 0.5353021240234375, 0.5345812377929687, 0.5350471801757812, 0.5342730102539063, 0.5356431274414063, 0.5344461059570312, 0.5352222900390625, 0.534319091796875, 0.5352243041992187, 0.5342218017578125, 0.5354874877929687, 0.5344635009765625, 0.5353492431640625, 0.5346590576171875, 0.5352969970703125, 0.5345413208007812, 0.5364439086914062, 0.5349744873046876, 0.5354383544921875, 0.5349171142578125, 0.5353850708007812, 0.5352386474609375, 0.5347727661132813, 0.5347440795898437, 0.5347891235351563, 0.53429248046875, 0.5350348510742188, 0.5342208251953126, 1.1111966552734376, 0.5346058349609375, 0.5340743408203125, 0.5344491577148438, 0.5340753784179687, 0.5344573364257813, 0.5351997680664062, 0.5346652221679687, 0.5347676391601562, 0.5348843383789063, 0.5340805053710938, 0.5348229370117188, 0.5342904052734375, 0.5346806030273438, 0.5341122436523438, 0.5346744384765625, 0.5341951904296875, 0.53463037109375, 0.5346017456054688, 0.5344931640625, 0.5348495483398438, 0.5341675415039062, 0.534192138671875, 0.5346129760742188, 0.5353421020507813, 0.5345730590820312, 0.5347010498046875, 0.5343364868164062, 0.53553564453125, 0.5343969116210937, 0.534887451171875, 0.5349539794921875, 0.5349130249023437, 0.5349970092773437, 0.5343682861328125, 0.534813720703125, 0.5341798095703125, 0.5349908447265626, 0.5340886840820313, 0.5348864135742187, 0.53427197265625, 0.5349468383789062, 0.5341091918945312, 0.5351629028320313, 0.5349376220703125, 0.5348556518554688, 0.53496728515625, 0.5342669067382813, 0.5347440795898437, 0.53432421875, 0.53492529296875, 0.5341470947265625, 0.5350174560546875, 0.5343016967773437, 0.5352642822265625, 0.5343908081054688, 0.535689208984375, 0.535041015625, 0.5345894165039062, 0.5347891235351563, 0.5347962646484375, 0.5358428344726562, 0.5355755615234375]",tokens/s,1.8416089513521527,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 101, in __init__ assert self.in_features % self.group_size == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1740.263424,9941.024768,0.0,9294.577664,8910.102528,s,10,10.642888305664064,1.0642888305664062,0.0008435144191809837,1.06450927734375,1.0651936889648437,1.065327557373047,1.0654346520996094,"[1.065136474609375, 1.06546142578125, 1.063631591796875, 1.063218505859375, 1.06368701171875, 1.0628612060546876, 1.0644658203125, 1.064552734375, 1.0647095947265626, 1.0651639404296875]",tokens/s,240.5362084498799,kWh,1.2557922783825134e-05,6.880571250694628e-06,6.097363211219897e-05,8.041212614671872e-05,tokens/kWh,3183599.4428614704,MB,1740.86144,9941.024768,0.0,9294.577664,9220.867072,s,10,630.7354296875,63.07354296875,0.00490218127220892,63.07426171875,63.081011718750005,63.081064453125,63.081106640625,"[63.0750703125, 63.07307421875, 63.081, 63.075046875, 63.07416015625, 63.0811171875, 63.07436328125, 63.0665078125, 63.06765234375, 63.0674375]",tokens/s,0.9988340124037992,kWh,0.0007445980064074199,0.0004081038876869388,0.003603297882636,0.004755999776730358,tokens/kWh,13246.426189555263,,s,629,639.312124511719,1.016394474581429,0.12643667238363906,1.0011002807617186,1.0017415283203124,1.002017578125,2.065029130859375,"[1.0008678588867188, 1.0004398193359374, 1.001291748046875, 1.0011023559570313, 1.0010675048828126, 1.0010203857421875, 1.0008780517578124, 1.000616943359375, 1.0006917114257812, 1.0008555297851562, 1.0007982177734376, 1.000685546875, 1.0008023071289063, 1.000985595703125, 1.0008780517578124, 1.0010081787109375, 1.0006251220703124, 1.0007255249023437, 1.000806396484375, 1.0009149169921876, 1.0006405029296874, 1.0007285766601564, 1.0012835693359374, 1.0011812133789062, 1.0010265502929687, 1.0012303466796875, 1.0011586303710938, 1.0011637573242187, 1.0010664672851564, 1.0014730224609374, 1.0011238403320313, 1.0014033813476562, 1.0011033325195313, 1.0013900756835938, 1.0010501098632814, 1.0017617797851563, 1.0014638061523438, 1.0016256103515624, 1.0009415893554687, 1.0013726806640626, 1.000985595703125, 1.0008145751953126, 1.001359375, 1.0016143188476563, 1.0011217651367188, 1.0013255615234375, 1.0009302978515624, 1.001133056640625, 1.0009508056640626, 1.001533447265625, 1.0019143676757813, 1.0015897827148437, 1.0020361938476563, 1.0020464477539062, 1.00206591796875, 1.0012640991210937, 1.0014044189453124, 1.001734130859375, 1.0015846557617187, 1.00234033203125, 1.0014207763671874, 1.0013255615234375, 2.067092529296875, 1.0008361206054688, 1.0011576538085938, 1.0006384887695312, 1.001079833984375, 1.0007879638671875, 1.0012293090820312, 1.0010501098632814, 1.001322509765625, 1.0013265991210938, 1.0013900756835938, 1.000838134765625, 1.001486328125, 1.0010675048828126, 1.0011484375, 1.0016091918945313, 1.001449462890625, 1.0011023559570313, 1.0005330200195313, 1.0008074340820312, 1.0010623779296874, 1.0006220703125, 1.0009927978515625, 1.0007716064453125, 1.001026611328125, 1.0008770141601562, 1.0014136352539063, 1.0009508056640626, 1.0009508056640626, 1.00097021484375, 1.001290771484375, 1.00075830078125, 1.0016583862304687, 1.0007131958007813, 1.0006619873046876, 1.0005339965820312, 1.0010706176757813, 1.0007859497070313, 1.0006456298828126, 1.0009036865234375, 1.00139111328125, 1.0010776977539062, 1.0015672607421875, 1.0008135375976563, 1.0011463623046875, 1.000784912109375, 1.0035189819335937, 1.00113818359375, 1.0015252685546876, 1.0012507934570312, 1.0016573486328124, 1.0014893798828124, 1.001533447265625, 1.0010839233398436, 1.0011525268554688, 1.0011566162109375, 1.0015396118164062, 1.0014955444335938, 1.0011074829101563, 1.0009037475585938, 1.0016132202148438, 1.0009343872070313, 1.002029052734375, 2.06504150390625, 1.0005913696289062, 1.0010398559570313, 1.00097021484375, 1.0005196533203125, 1.0015057983398437, 1.0011084594726563, 1.0007337036132813, 1.0012190551757814, 1.0008473510742188, 1.0012252197265625, 1.0008402099609375, 1.0011617431640625, 1.0010194091796876, 1.0008719482421875, 1.0009927978515625, 1.0008237915039062, 1.00075927734375, 1.0011688842773439, 1.000806396484375, 1.0008606567382812, 1.0010132446289062, 1.0011740112304688, 1.00221337890625, 1.0005995483398438, 1.000721435546875, 1.00090673828125, 1.0007039794921875, 1.001343994140625, 1.001470947265625, 1.0014852905273437, 1.00143408203125, 1.0017413330078124, 1.002018798828125, 1.0018693237304688, 1.0017587280273437, 1.0011954956054687, 1.0007203979492187, 1.0011064453125, 1.00105419921875, 1.0013839111328124, 1.0012108764648437, 1.001881591796875, 1.0012119140625, 1.0016245727539062, 1.0019154052734376, 1.0013501586914062, 1.0026547241210937, 1.0016737060546874, 1.0011678466796874, 1.00153857421875, 1.0014085083007813, 1.0018191528320313, 1.001275390625, 1.0014157104492187, 1.0018908081054687, 1.0011791381835937, 1.0009927978515625, 1.0019522705078125, 1.0013368530273437, 1.0016307373046875, 1.0016552734375, 1.001829345703125, 2.064997314453125, 1.001175048828125, 1.0011033935546876, 1.0011688232421876, 1.001164794921875, 1.0007736206054687, 1.001064453125, 1.0007973022460936, 1.00076025390625, 1.0006077270507812, 1.0007183227539063, 1.0011361083984376, 1.0011351318359376, 1.0006005859375, 1.0008688354492188, 1.000642578125, 1.00075830078125, 1.0010562744140625, 1.001421875, 1.0009476318359376, 1.000995849609375, 1.0008811645507814, 1.001438232421875, 1.000816650390625, 1.0011361083984376, 1.0006569213867187, 1.0008370971679688, 1.0008350830078125, 1.00099072265625, 1.0007787475585936, 1.0014464111328125, 1.0013634643554687, 1.0017720336914062, 1.001069580078125, 1.001523193359375, 1.0014351196289062, 1.002029052734375, 1.001865234375, 1.0020095825195312, 1.0007572631835937, 1.0010081176757812, 1.0010040283203125, 1.000943603515625, 1.001080810546875, 1.0031155395507811, 1.000857666015625, 1.00139208984375, 1.001006103515625, 1.0015139770507813, 1.0011719970703126, 1.0016010131835937, 1.0014443359375, 1.0011443481445312, 1.0013952026367188, 1.0011340942382811, 1.0010603637695312, 1.0010203857421875, 1.0011033935546876, 1.0015077514648438, 1.00125390625, 1.002071044921875, 1.0015109252929688, 1.0014412841796876, 2.065207275390625, 1.0004561767578124, 1.0008197021484375, 1.0008104858398437, 1.0015027465820312, 1.0008872680664063, 1.0009497680664063, 1.0020311279296874, 1.0013726806640626, 1.0015405883789064, 1.00103271484375, 1.0013511962890624, 1.0011668701171874, 1.0011986083984374, 1.00210791015625, 1.0007920532226562, 1.0011351318359376, 1.0023803100585937, 1.0009579467773437, 1.0010460205078124, 1.0012252197265625, 1.0007500610351563, 1.001091064453125, 1.0007879638671875, 1.0017576904296874, 1.0010828857421874, 1.0014474487304688, 1.0011094970703125, 1.0007890014648437, 1.0009169921875, 1.0007787475585936, 1.0004838256835937, 1.0008545532226563, 1.0006609497070313, 1.0009682006835938, 1.0007777099609374, 1.0011300048828125, 1.00124365234375, 1.000796142578125, 1.0009343872070313, 1.0009251708984375, 1.000722412109375, 1.0007971801757813, 1.0006896362304687, 1.0014924926757813, 1.0007080688476562, 1.0007705688476562, 1.0013204345703124, 1.001218017578125, 1.0010511474609376, 1.0009517822265626, 1.0008607177734374, 1.0009199829101563, 1.00119140625, 1.0017146606445313, 1.0010194091796876, 1.0013358154296874, 1.003936767578125, 1.001802734375, 1.0014443359375, 1.0012477416992187, 1.0015396118164062, 1.0018938598632812, 2.064691162109375, 1.0003916625976563, 1.0009784545898437, 1.0010337524414064, 1.0008944702148437, 1.00107470703125, 1.0008616943359374, 1.001354248046875, 1.00143408203125, 1.0012088623046875, 1.0020802612304687, 1.0009651489257811, 1.001312255859375, 1.0014505004882812, 1.0007481079101563, 1.0010009155273438, 1.0012723388671876, 1.0009712524414063, 1.0011033325195313, 1.0011371459960938, 1.0015723266601562, 1.0016163940429688, 1.0008381958007813, 1.0017801513671876, 1.0015897827148437, 1.0014259033203126, 1.0019799194335937, 1.0012518310546874, 1.0014515380859375, 1.0014484252929687, 1.001759765625, 1.00107568359375, 1.0014505004882812, 1.0014423217773438, 1.001112548828125, 1.0011678466796874, 1.0010286254882812, 1.0009210815429688, 1.0033643798828125, 1.0010767211914062, 1.001469970703125, 1.0009139404296874, 1.001143310546875, 1.00132763671875, 1.0011791381835937, 1.001169921875, 1.0012620849609375, 1.0009456787109374, 1.0010347290039063, 1.00092724609375, 1.0015047607421874, 1.00096923828125, 1.0009251708984375, 1.0013870239257812, 1.0013132934570312, 1.0014105834960938, 1.0011658325195312, 1.0012948608398438, 1.0014935302734376, 1.0009978637695311, 1.0015344848632812, 1.001290771484375, 1.0020157470703126, 2.0664228515625, 1.001302001953125, 1.000827880859375, 1.0009938354492187, 1.0006937255859376, 1.0006784057617188, 1.000911865234375, 1.0006487426757813, 1.0011852416992189, 1.0008043823242188, 1.0012477416992187, 1.0013726806640626, 1.0013101806640625, 1.0008023071289063, 1.0008514404296875, 1.0008248291015625, 1.0011658325195312, 1.001006103515625, 1.0012498168945312, 1.0011207885742188, 1.0012610473632813, 1.0012415771484375, 1.0010951538085937, 1.0023833618164062, 1.0010685424804688, 1.0005288696289063, 1.000853515625, 1.000543212890625, 1.0008811645507814, 1.0009784545898437, 1.0013153076171875, 1.0010859375, 1.0009425659179687, 1.0009343872070313, 1.000748046875, 1.000453125, 1.0013388671875, 1.00075927734375, 1.000826904296875, 1.00143310546875, 1.0011207885742188, 1.0015027465820312, 1.001175048828125, 1.0009395141601563, 1.0011279296875, 1.0010224609375, 1.0013798217773437, 1.0013460693359375, 1.00193896484375, 1.00166552734375, 1.0014197998046874, 1.0018252563476562, 1.0011893920898438, 1.0011268920898437, 1.0015764770507813, 1.0016574096679687, 1.0018969116210938, 1.0011760864257813, 1.0016829223632813, 1.0013952026367188, 1.0015078125, 1.0017791748046876, 1.001365478515625, 2.065933349609375, 1.00049609375, 1.0008944702148437, 1.00080126953125, 1.0008135375976563, 1.0008340454101563, 1.0008822021484376, 1.001302001953125, 1.0008237915039062, 1.0006292724609376, 1.0011105346679687, 1.0010654907226562, 1.0013255615234375, 1.0006692504882813, 1.0007797241210938, 1.0010091552734375, 1.0015211791992187, 1.0007367553710937, 1.0007234497070312, 1.0005473022460938, 1.0006712036132812, 1.00077978515625, 1.0011351318359376, 1.001279541015625, 1.0010480346679687, 1.0010921020507813, 1.000892333984375, 1.001080810546875, 1.00067431640625, 1.0010296020507812, 1.0010767822265625, 1.0005595703125, 1.0013173828125, 1.0010501098632814, 1.0013409423828126, 1.0012006225585937, 1.0011443481445312, 1.0012139282226562, 1.0008053588867187, 1.0007408447265624, 1.0009384765625, 1.0005883178710937, 1.0010715942382813, 1.0009569091796875, 1.0010900268554688, 1.0009027099609376, 1.000849365234375, 1.0009651489257811, 1.0008524780273438, 1.001027587890625, 1.0014033813476562, 1.0012498168945312, 1.00135009765625, 1.001059326171875, 1.0014095458984376, 1.0020269775390624, 1.0020638427734374, 1.0020095825195312, 1.0013235473632813, 1.0012037353515626, 1.0014484252929687, 1.0009476928710936, 1.0012733154296876, 2.066282470703125, 1.0010859375, 1.0004767456054688, 1.0010264892578125, 1.0008135375976563, 1.0007572631835937, 1.0011146240234374, 1.0009620361328124, 1.0013931274414063, 1.0009630737304687, 1.0009375, 1.0007900390625, 1.0008340454101563, 1.0009886474609375, 1.0007900390625, 1.0007756958007812, 1.0007982177734376, 1.0019952392578124, 1.0009794311523437, 1.0006138916015626, 1.0008309936523438, 1.0008893432617187, 1.0010368041992188, 1.0011248779296875, 1.0005238037109374, 1.0006343383789063, 1.0008145751953126, 1.0007039794921875, 1.0007521362304688, 1.0009896850585938, 1.0010880126953126, 1.0012303466796875, 1.00091796875, 1.0013450317382813, 1.000784912109375, 1.0007367553710937, 1.0009825439453126, 1.0009108276367187, 1.001238525390625, 1.0008944702148437, 1.0009989013671876, 1.0009886474609375, 1.0009651489257811, 1.0011422729492188, 1.0012374877929688, 1.0012569580078126, 1.00099072265625, 1.0008780517578124, 1.0009682006835938, 1.0007080688476562, 1.001248779296875, 1.0015518798828125, 1.0010582885742187, 1.0014033813476562, 1.001016357421875, 1.0009476318359376, 1.0012354736328124, 1.0020833129882813, 1.0019143676757813, 1.0018539428710938, 1.0019850463867188, 1.0015999755859375, 1.0015641479492188, 2.0665712890625, 1.0007060546875, 1.0006599731445311, 1.00056884765625, 1.000685546875, 1.0005678100585937, 1.0009722900390625, 1.000806396484375, 1.0007203979492187, 1.0008207397460938, 1.0005872802734375, 1.0005545043945312, 1.00069384765625, 1.0008534545898438, 1.0013716430664064, 1.0008811645507814, 1.0013511962890624, 1.0014893798828124, 1.0010726318359375, 1.0017157592773438, 1.0011299438476562, 1.00086376953125, 1.00071728515625, 1.0005729370117187, 1.0008995971679688, 1.0005995483398438, 1.0024898681640626, 1.0008678588867188, 1.000722412109375, 1.0012149658203124, 1.0008237915039062, 1.00071728515625, 1.00101220703125, 1.0010562744140625, 1.0010572509765625, 1.00170751953125, 1.00140234375, 1.000974365234375, 1.0007828369140626, 1.0009139404296874, 1.0011300048828125, 1.0010675048828126, 1.0013562622070313, 1.000953857421875, 1.0009722900390625, 1.0011146240234374, 1.0015631103515625, 1.000975341796875, 1.0017495727539063, 1.0013224487304688, 1.0012518310546874, 1.001439208984375, 1.0012415771484375, 1.0010685424804688, 1.0010286254882812, 1.00105419921875, 1.0016696166992187, 1.0011002807617186, 1.0011443481445312, 1.0017423095703124, 1.0011033325195313, 1.0011924438476563, 1.0012406005859376]",tokens/s,0.9838699688049951,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b6d-2d3f547366b764a42fe11f4a;f118b668-3cf6-4e2f-84ac-1d5624c462cc) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1868.201984,3349.676032,0.0,2703.228928,2578.238464,s,10,1.410495346069336,0.1410495346069336,0.0017689689177857657,0.14056432342529296,0.14309543151855467,0.14402269134521484,0.14476449920654297,"[0.144949951171875, 0.1412518768310547, 0.1397279357910156, 0.13956182861328126, 0.13982972717285155, 0.13892410278320313, 0.13987677001953125, 0.14288937377929686, 0.1413212127685547, 0.14216256713867187]",tokens/s,1814.9652227737006,kWh,1.6439394082552122e-06,9.00797525271931e-07,6.874183740083411e-06,9.418920673610555e-06,tokens/kWh,27179334.96533712,MB,1868.201984,3349.676032,0.0,2703.228928,2667.098624,s,10,83.44500195312501,8.3445001953125,0.03302115511867202,8.3336259765625,8.37593193359375,8.403280419921876,8.425159208984375,"[8.33203515625, 8.335076171875, 8.333888671875, 8.3144501953125, 8.32368359375, 8.3150146484375, 8.3698544921875, 8.43062890625, 8.3570068359375, 8.33336328125]",tokens/s,7.549882979856609,kWh,9.832901556366757e-05,5.38916025818177e-05,0.0003962753054459165,0.0005484959235914018,tokens/kWh,114859.55918777513,,s,629,84.57831109619147,0.13446472352335676,0.016756456539684012,0.13199974060058595,0.1339588592529297,0.13428590698242188,0.27203026611328124,"[0.131915771484375, 0.13208883666992188, 0.13340570068359375, 0.13261517333984374, 0.1320437774658203, 0.13189222717285157, 0.13191372680664062, 0.13180621337890625, 0.13191270446777345, 0.1318174743652344, 0.13188607788085938, 0.13165977478027344, 0.13220761108398438, 0.13279539489746095, 0.132642822265625, 0.1319086151123047, 0.13183590698242187, 0.13183692932128907, 0.13191372680664062, 0.1319710693359375, 0.13185536193847655, 0.13166387939453125, 0.13191888427734375, 0.13172015380859375, 0.1317058563232422, 0.13175704956054687, 0.1318830108642578, 0.13170687866210937, 0.1325506591796875, 0.13195468139648436, 0.13218412780761718, 0.13190444946289062, 0.1321625671386719, 0.13238067626953126, 0.13378150939941405, 0.13233255004882813, 0.13199667358398437, 0.13365965270996094, 0.13226495361328125, 0.13205401611328124, 0.13234483337402345, 0.13211033630371094, 0.13187481689453126, 0.13180210876464843, 0.13335859680175782, 0.13234176635742187, 0.13278311157226563, 0.13272679138183593, 0.1324042205810547, 0.13250457763671875, 0.13250662231445312, 0.13336883544921874, 0.13242880249023437, 0.13245030212402345, 0.13245234680175783, 0.13265408325195313, 0.1332162628173828, 0.13244825744628907, 0.1322782745361328, 0.13222093200683593, 0.13194239807128907, 0.1318461456298828, 0.2743060607910156, 0.13452493286132813, 0.13292338562011718, 0.13269094848632812, 0.13221580505371094, 0.13191372680664062, 0.13204888916015625, 0.1319393310546875, 0.13207244873046875, 0.13193624877929688, 0.1332346954345703, 0.13184307861328126, 0.13165977478027344, 0.13174169921875, 0.1319331817626953, 0.13257011413574218, 0.13245234680175783, 0.13182669067382813, 0.13220249938964843, 0.1319772186279297, 0.13190451049804688, 0.13176934814453126, 0.13176832580566405, 0.13202943420410157, 0.131842041015625, 0.13187992858886718, 0.13245132446289062, 0.1319014434814453, 0.13174374389648438, 0.13188096618652342, 0.1329971160888672, 0.1325875244140625, 0.1321922607421875, 0.13208473205566407, 0.1319751739501953, 0.13210829162597656, 0.132173828125, 0.1320202178955078, 0.13244108581542968, 0.1323520050048828, 0.13193624877929688, 0.1318707275390625, 0.13215335083007812, 0.1319833526611328, 0.13199871826171874, 0.13258546447753905, 0.1363056640625, 0.13410917663574218, 0.132347900390625, 0.13224557495117187, 0.13346809387207031, 0.1320499267578125, 0.1319823303222656, 0.13196595764160157, 0.13193113708496093, 0.13199974060058595, 0.13258444213867188, 0.13302787780761718, 0.13199562072753906, 0.13191885375976561, 0.13196493530273437, 0.1318461456298828, 0.13196800231933595, 0.27239935302734375, 0.1318348846435547, 0.13182975769042968, 0.13207449340820313, 0.13185638427734375, 0.1317724151611328, 0.13244415283203126, 0.13195263671875, 0.1320099792480469, 0.13171302795410156, 0.13188505554199217, 0.13182054138183594, 0.13208575439453124, 0.1338101806640625, 0.13198439025878905, 0.13201919555664063, 0.13190348815917968, 0.13199871826171874, 0.13165977478027344, 0.13169357299804688, 0.13205708312988282, 0.13223014831542967, 0.13255679321289063, 0.13336166381835937, 0.1323274230957031, 0.1321175079345703, 0.13262745666503906, 0.13243597412109376, 0.13281074523925782, 0.13219737243652344, 0.13270425415039064, 0.1319403839111328, 0.13225782775878905, 0.13328172302246094, 0.13259266662597657, 0.13259056091308594, 0.13216152954101562, 0.1320622100830078, 0.13242469787597655, 0.1341696014404297, 0.13195263671875, 0.13167718505859374, 0.13170278930664062, 0.13178675842285156, 0.13180108642578126, 0.13196902465820312, 0.13176319885253907, 0.1331517791748047, 0.1319188232421875, 0.13173248291015624, 0.13183795166015624, 0.13228440856933593, 0.1320273895263672, 0.13176934814453126, 0.13183897399902345, 0.13179600524902343, 0.13171708679199218, 0.13180108642578126, 0.13352243041992187, 0.1350635528564453, 0.13252915954589845, 0.13388493347167968, 0.1324451904296875, 0.2718658447265625, 0.1323663330078125, 0.1318041534423828, 0.1317375946044922, 0.13224755859375, 0.13172837829589842, 0.13183692932128907, 0.13180825805664062, 0.13196493530273437, 0.13201612854003905, 0.13178060913085937, 0.13186866760253907, 0.13176422119140624, 0.13189222717285157, 0.13185331726074218, 0.13174476623535156, 0.13189631652832032, 0.1319833526611328, 0.13183180236816405, 0.13173965454101563, 0.13178880310058594, 0.13191474914550783, 0.13178163146972657, 0.13190553283691406, 0.132674560546875, 0.13195161437988281, 0.13180313110351563, 0.13180108642578126, 0.13176524353027344, 0.13174578857421876, 0.1317232666015625, 0.13185740661621093, 0.1315359344482422, 0.13187271118164062, 0.13189427185058594, 0.13176217651367186, 0.13183999633789062, 0.131842041015625, 0.13198028564453124, 0.1318461456298828, 0.13185740661621093, 0.13189222717285157, 0.1318656005859375, 0.13177548217773438, 0.13433549499511718, 0.1318778839111328, 0.13178163146972657, 0.1320099792480469, 0.13189529418945312, 0.13173043823242186, 0.13199974060058595, 0.1317908477783203, 0.13167718505859374, 0.1318707580566406, 0.1319085693359375, 0.1320079345703125, 0.1317375946044922, 0.1329776611328125, 0.13234278869628907, 0.13224960327148438, 0.13266841125488282, 0.13238578796386719, 0.13234994506835937, 0.2727505798339844, 0.13219532775878906, 0.1322977294921875, 0.1321318359375, 0.13251072692871094, 0.13448908996582032, 0.13275852966308593, 0.1344153594970703, 0.13294796752929688, 0.1335828552246094, 0.13194137573242187, 0.1318604736328125, 0.13178469848632812, 0.13185023498535156, 0.13183282470703125, 0.13243084716796874, 0.13204582214355468, 0.13287628173828125, 0.13193522644042968, 0.13247999572753907, 0.13198028564453124, 0.13178880310058594, 0.1318225860595703, 0.13189529418945312, 0.1317969970703125, 0.1317898254394531, 0.13174783325195313, 0.13180825805664062, 0.1317611541748047, 0.13181951904296876, 0.13173043823242186, 0.1318901824951172, 0.1316822967529297, 0.1317908477783203, 0.13169664001464843, 0.1318656005859375, 0.1333289031982422, 0.1319536590576172, 0.13187890625, 0.13186457824707032, 0.13181951904296876, 0.1317969970703125, 0.1317580871582031, 0.13171200561523438, 0.13176934814453126, 0.13164851379394532, 0.13176524353027344, 0.13194239807128907, 0.13188812255859375, 0.13191270446777345, 0.1322117156982422, 0.13192909240722656, 0.13183999633789062, 0.1317693786621094, 0.13185020446777343, 0.13187583923339843, 0.1317611541748047, 0.13403237915039062, 0.13205503845214844, 0.13186151123046874, 0.13183795166015624, 0.13185536193847655, 0.13172531127929688, 0.2711296081542969, 0.13194137573242187, 0.131852294921875, 0.13176217651367186, 0.13171609497070313, 0.13177548217773438, 0.13163827514648438, 0.1318164520263672, 0.1318707275390625, 0.13187277221679689, 0.13203762817382814, 0.13187174987792968, 0.13186968994140624, 0.13186866760253907, 0.13186972045898437, 0.13196592712402344, 0.1320099792480469, 0.1333217315673828, 0.13214207458496094, 0.13328793334960937, 0.13255679321289063, 0.13287628173828125, 0.1319505920410156, 0.13182566833496093, 0.13165158081054687, 0.13177650451660156, 0.13287423706054688, 0.13261311340332033, 0.13195878601074218, 0.1316505584716797, 0.1316495361328125, 0.13174989318847657, 0.13171302795410156, 0.13187686157226564, 0.13190553283691406, 0.13175296020507812, 0.13174989318847657, 0.13191474914550783, 0.13393820190429687, 0.13187989807128905, 0.13174681091308593, 0.1316864013671875, 0.13167205810546875, 0.13172019958496095, 0.13174783325195313, 0.1315952606201172, 0.13168435668945314, 0.13165362548828125, 0.13327769470214842, 0.13195161437988281, 0.1319024963378906, 0.1318594207763672, 0.13180313110351563, 0.13170381164550782, 0.13171507263183593, 0.13190451049804688, 0.13232640075683594, 0.13192294311523436, 0.13172940063476563, 0.1317406768798828, 0.13174578857421876, 0.13170381164550782, 0.13169561767578125, 0.2720942077636719, 0.1323653106689453, 0.1319772186279297, 0.13174887084960937, 0.1318461456298828, 0.13169049072265626, 0.13180825805664062, 0.13170994567871094, 0.13199974060058595, 0.13190348815917968, 0.13172940063476563, 0.13172735595703125, 0.13171612548828124, 0.13159523010253907, 0.13188914489746092, 0.13179904174804688, 0.13198130798339844, 0.13195578002929687, 0.13164845275878906, 0.13183590698242187, 0.13194752502441406, 0.13176524353027344, 0.13180621337890625, 0.13325619506835937, 0.1319086456298828, 0.13182666015625, 0.13163929748535155, 0.13185125732421876, 0.1320396728515625, 0.13438668823242186, 0.13385317993164061, 0.13466111755371094, 0.13423411560058593, 0.133897216796875, 0.1338357696533203, 0.1337620849609375, 0.13375177001953126, 0.1338419189453125, 0.1338470458984375, 0.13377127075195314, 0.13373440551757812, 0.1337507781982422, 0.13362892150878905, 0.13362687683105468, 0.133718017578125, 0.1337139129638672, 0.13433139038085937, 0.1335930938720703, 0.13229158020019532, 0.13321932983398438, 0.1331998748779297, 0.1333289031982422, 0.13277183532714842, 0.13196185302734376, 0.13300531005859376, 0.13356031799316406, 0.13304013061523437, 0.13298892211914062, 0.1336432647705078, 0.13445120239257813, 0.13455258178710938, 0.1344040985107422, 0.13423922729492188, 0.27732583618164064, 0.1339084777832031, 0.13382144165039062, 0.1346867218017578, 0.13416653442382812, 0.13392076110839843, 0.13295001220703126, 0.13354495239257813, 0.13410508728027343, 0.1338173370361328, 0.13408869934082032, 0.1340200958251953, 0.1339043884277344, 0.13345074462890624, 0.1340712890625, 0.133653564453125, 0.1348218231201172, 0.13366886901855468, 0.13357466125488282, 0.1335029754638672, 0.1333729248046875, 0.13342617797851564, 0.1340518341064453, 0.1341563262939453, 0.13418496704101562, 0.13429244995117187, 0.13391769409179688, 0.13419314575195312, 0.13331149291992186, 0.13398121643066407, 0.1340333709716797, 0.13403135681152345, 0.13413682556152343, 0.13416447448730467, 0.1341071319580078, 0.13356748962402343, 0.13255885314941407, 0.1329449005126953, 0.13408767700195312, 0.13382655334472657, 0.13446556091308592, 0.134451171875, 0.1342740478515625, 0.13432524108886718, 0.13397196960449217, 0.133928955078125, 0.13388800048828126, 0.13408154296875, 0.1339463653564453, 0.13414707946777343, 0.1341204528808594, 0.1346938934326172, 0.13456895446777345, 0.13381427001953125, 0.13382553100585937, 0.13402316284179688, 0.13395558166503907, 0.13448602294921874, 0.13234994506835937, 0.13215437316894532, 0.13213081359863282, 0.13221376037597657, 0.13244825744628907, 0.27383807373046876, 0.13208677673339844, 0.1318748779296875, 0.13186143493652344, 0.1319342041015625, 0.1316986846923828, 0.13210009765625, 0.13194956970214844, 0.1318338623046875, 0.13179904174804688, 0.13189427185058594, 0.1324390411376953, 0.1331261444091797, 0.13414399719238282, 0.13440205383300782, 0.13397196960449217, 0.13388394165039064, 0.13371900939941406, 0.13379379272460937, 0.1336944580078125, 0.13435395812988282, 0.133012451171875, 0.1330391082763672, 0.13337496948242186, 0.13211033630371094, 0.1332316131591797, 0.1331374053955078, 0.1329827880859375, 0.13283839416503906, 0.13333401489257812, 0.1330401611328125, 0.1329510040283203, 0.13266636657714845, 0.132642822265625, 0.1325813751220703, 0.1328547821044922, 0.13313536071777343, 0.13244313049316406, 0.13259365844726562, 0.13253018188476562, 0.1329459228515625, 0.13234585571289062, 0.13299200439453124, 0.13250764465332032, 0.13244210815429688, 0.13175296020507812, 0.1317580871582031, 0.1318338623046875, 0.1318666229248047, 0.1317580871582031, 0.13186358642578125, 0.13192803955078125, 0.13181849670410156, 0.13414501953125, 0.13427609252929687, 0.13285580444335937, 0.13174476623535156, 0.1318656005859375, 0.13187277221679689, 0.13214002990722656, 0.13324185180664064, 0.13186151123046874, 0.13166592407226563, 0.27445761108398437, 0.13168025207519532, 0.1318492126464844, 0.13196595764160157, 0.13203353881835939, 0.13289573669433594, 0.13374771118164064, 0.13365350341796875, 0.13377433776855469, 0.13357772827148437, 0.13372621154785155, 0.1335900115966797, 0.1336494140625, 0.13360946655273437, 0.1339095001220703, 0.13365863037109374, 0.13385317993164061, 0.1333053436279297, 0.13203660583496094, 0.13213388061523437, 0.13183897399902345, 0.132063232421875, 0.13191372680664062, 0.13187992858886718, 0.13165875244140626, 0.1317560272216797, 0.13234994506835937, 0.13201510620117188, 0.13194342041015625, 0.13241856384277345, 0.13182566833496093, 0.1321871337890625, 0.13201715087890625, 0.13212979125976562, 0.13185331726074218, 0.13186764526367187, 0.1317969970703125, 0.13172940063476563, 0.13185433959960938, 0.1317959747314453, 0.13192909240722656, 0.1318144073486328, 0.1319772186279297, 0.13174783325195313, 0.13176422119140624, 0.13171916198730468, 0.13182464599609375, 0.1319086151123047, 0.13198745727539063, 0.13206431579589845, 0.13207244873046875, 0.1318757781982422, 0.13179391479492186, 0.13187379455566406, 0.13178060913085937, 0.1318113250732422, 0.13175196838378905, 0.13170889282226564, 0.13177754211425782, 0.13175091552734375, 0.1317611541748047, 0.13186968994140624, 0.1329213409423828]",tokens/s,7.43689477654188,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a338d-5234d48921a93a5d600bbd8f;9482e905-913f-4760-9097-cf49709e2b7f) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2280.779776,9584.508928,0.0,8938.061824,8629.0688,s,10,10.176551086425778,1.0176551086425778,0.0015328797671352301,1.0172308959960938,1.018193273925781,1.0201789428710937,1.0217674780273438,"[1.0221646118164063, 1.0172261352539063, 1.0167738647460938, 1.0172377319335937, 1.0172356567382812, 1.0169893188476562, 1.0167650146484375, 1.017517822265625, 1.016888916015625, 1.0177520141601561]",tokens/s,251.55870375521562,kWh,1.2027085920174918e-05,6.590293178160209e-06,5.696557335020391e-05,7.558295244853905e-05,tokens/kWh,3387007.145219666,MB,2281.074688,9584.508928,0.0,8938.061824,8715.66592,s,10,595.0380507812499,59.50380507812499,0.008158332777429248,59.504548828124996,59.5143921875,59.51552421875,59.51642984375,"[59.500625, 59.49740234375, 59.49325390625, 59.50875, 59.50894140625, 59.514140625, 59.4951328125, 59.50847265625, 59.49467578125, 59.51665625]",tokens/s,1.0587558210316923,kWh,0.0007025720743172699,0.00038506887292403917,0.0033221314077029936,0.004409772354944303,tokens/kWh,14286.451755125965,,s,629,603.2482678222655,0.9590592493199772,0.12088183404247937,0.9444280395507813,0.945362939453125,0.9457379272460937,1.96203095703125,"[0.9439191284179688, 0.9438074951171875, 0.9435494384765625, 0.9451939697265626, 0.9448151245117188, 0.9447935791015625, 0.94410546875, 0.9439744262695312, 0.9437614135742187, 0.9440634765625, 0.94390576171875, 0.9446031494140625, 0.9444812622070312, 0.945565673828125, 0.9449103393554688, 0.9443286743164062, 0.9433487548828124, 0.9437614135742187, 0.943837158203125, 0.9445468139648437, 0.9442355346679687, 0.9440911254882812, 0.9451141357421875, 0.9447239379882812, 0.9452031860351563, 0.9445714111328125, 0.9450618896484375, 0.9444659423828125, 0.9439969482421875, 0.9436231689453125, 0.944216064453125, 0.9437163696289063, 0.9447649536132813, 0.9448642578125, 0.9447239379882812, 0.9447444458007812, 0.944990234375, 0.9443727416992187, 0.9441873779296875, 0.9447096557617187, 0.9451028442382813, 0.9457059936523438, 0.9445437622070313, 0.9448652954101563, 0.9439764404296875, 0.945954833984375, 0.9447557373046875, 0.9458032836914062, 0.9443123168945312, 0.9440501708984375, 0.9439887084960937, 0.9450444946289063, 0.9437235107421875, 0.944195556640625, 0.9443512573242188, 0.9439027099609375, 0.9452513427734375, 0.9451724853515625, 0.94409521484375, 0.9442816162109375, 0.9435177001953124, 0.943952880859375, 1.961964599609375, 0.9452953491210937, 0.9439201049804687, 0.9446041870117188, 0.9441761474609375, 0.9450936279296875, 0.9438955688476562, 0.944521240234375, 0.9442867431640625, 0.944669677734375, 0.9439436645507813, 0.9444669189453125, 0.9447761840820312, 0.9443717041015625, 0.9457489624023437, 0.9448304443359375, 0.9437224731445313, 0.943720458984375, 0.943825927734375, 0.9439569702148437, 0.9447649536132813, 0.9438760986328125, 0.94512744140625, 0.9446553344726563, 0.9442754516601563, 0.9448980712890624, 0.944395263671875, 0.9436589965820312, 0.9441065063476562, 0.9436221313476563, 0.94472705078125, 0.9441556396484375, 0.9437644653320313, 0.94514892578125, 0.9441781616210938, 0.9451366577148438, 0.943805419921875, 0.9449574584960938, 0.94401025390625, 0.9444935913085938, 0.9444280395507813, 0.9449482421875, 0.9444423828125, 0.944490478515625, 0.9455534057617188, 0.9445355224609375, 0.944510986328125, 0.9453905639648438, 0.944805908203125, 0.9447341918945312, 0.9445673217773437, 0.9435391845703125, 0.94436865234375, 0.9436989135742188, 0.9443194580078125, 0.9437255859375, 0.943572998046875, 0.9442672729492188, 0.9445857543945313, 0.9441392822265625, 0.9437747192382813, 0.9438228759765624, 0.9449410400390625, 1.9620567626953125, 0.94470556640625, 0.9442181396484375, 0.945459228515625, 0.944817138671875, 0.944289794921875, 0.9441679077148437, 0.943963134765625, 0.9437173461914062, 0.9446461181640625, 0.9440194702148438, 0.9441679077148437, 0.9439436645507813, 0.94363134765625, 0.9440819091796875, 0.9446000366210937, 0.9443461303710937, 0.9433681640625, 0.9446809692382813, 0.945306640625, 0.9444782104492188, 0.9442139892578125, 0.9447669677734375, 0.9443020629882812, 0.9446246337890625, 0.9445652465820312, 0.9445877685546875, 0.94449560546875, 0.9440122680664063, 0.9438228759765624, 0.9451950073242188, 0.9438597412109375, 0.9438197631835937, 0.9448263549804687, 0.9439232177734375, 0.9453045654296875, 0.9447987060546875, 0.944822265625, 0.944021484375, 0.9442969360351563, 0.9446051635742188, 0.945090576171875, 0.9438689575195313, 0.9441822509765625, 0.9437224731445313, 0.9438904418945312, 0.9443860473632812, 0.9447403564453125, 0.944142333984375, 0.9441279907226563, 0.94382080078125, 0.9441802368164063, 0.9439436645507813, 0.9441648559570313, 0.9440030517578125, 0.9440491333007812, 0.944716796875, 0.9447926025390625, 0.9442672729492188, 0.9440327758789062, 0.9443020629882812, 0.944078857421875, 0.9440512084960937, 1.962303466796875, 0.9448621826171875, 0.9440481567382812, 0.945122314453125, 0.9454274291992187, 0.9450147705078125, 0.9446143798828125, 0.9449840698242188, 0.9448417358398438, 0.94497998046875, 0.9441136474609375, 0.9453189086914062, 0.9442518920898437, 0.9458114624023437, 0.9444659423828125, 0.9440430297851562, 0.943193115234375, 0.9432238159179688, 0.9438320922851563, 0.9455195922851563, 0.9445857543945313, 0.9451468505859375, 0.9443491821289063, 0.9451049194335938, 0.9449103393554688, 0.945438720703125, 0.9439078369140625, 0.9441853637695312, 0.9438812255859375, 0.9447454833984374, 0.9449072875976563, 0.9441033935546875, 0.9460459594726562, 0.9442908325195313, 0.9438238525390625, 0.9456240844726562, 0.9443204956054687, 0.9439620971679688, 0.9440112915039063, 0.944047119140625, 0.9453885498046875, 0.94415771484375, 0.9452503051757812, 0.9443839721679688, 0.9442600708007812, 0.9446881103515625, 0.9450864868164063, 0.944437255859375, 0.9439979248046875, 0.94411572265625, 0.9444382934570312, 0.9443983154296876, 0.9445570678710937, 0.9448038330078125, 0.944584716796875, 0.9450680541992188, 0.9447341918945312, 0.944616455078125, 0.9441351928710937, 0.9442734375, 0.944194580078125, 0.9450393676757812, 0.9440450439453125, 1.96240380859375, 0.944021484375, 0.9450997924804687, 0.9445232543945312, 0.944395263671875, 0.9442621459960937, 0.9440450439453125, 0.9442600708007812, 0.9443963012695312, 0.944542724609375, 0.9447127075195313, 0.944289794921875, 0.9443440551757812, 0.9460879516601562, 0.9445314331054687, 0.944732177734375, 0.9441167602539062, 0.9434654541015625, 0.9446461181640625, 0.9446021118164063, 0.9455134887695312, 0.94460107421875, 0.944163818359375, 0.9440798950195313, 0.9460429077148438, 0.9448857421875, 0.9443890991210937, 0.944552978515625, 0.9442201538085937, 0.945523681640625, 0.9453793334960937, 0.946145263671875, 0.9447127075195313, 0.9450925903320313, 0.9457950439453126, 0.9460930786132813, 0.9446625366210938, 0.9440460815429688, 0.9442805786132813, 0.9451847534179687, 0.9439201049804687, 0.9439293212890625, 0.9453352661132812, 0.9441095581054687, 0.9442600708007812, 0.9441249389648437, 0.9455513305664063, 0.9446041870117188, 0.9449758911132813, 0.944733154296875, 0.9457428588867187, 0.9441658935546875, 0.9438597412109375, 0.9436641235351563, 0.94344189453125, 0.945201171875, 0.9446563720703125, 0.9440634765625, 0.9435924682617187, 0.9437214965820313, 0.9441249389648437, 0.9439600830078125, 0.94362109375, 1.9618406982421874, 0.9449922485351563, 0.9445714111328125, 0.946429931640625, 0.9437757568359375, 0.9441884155273438, 0.943705078125, 0.94535986328125, 0.9445396728515625, 0.9449246826171875, 0.943847412109375, 0.9441607666015625, 0.9455728759765625, 0.9447864379882812, 0.9441095581054687, 0.9441699829101563, 0.9446051635742188, 0.9435985717773437, 0.9450465087890625, 0.9443768310546875, 0.9445549926757812, 0.944733154296875, 0.9450895385742187, 0.9451468505859375, 0.945554443359375, 0.9442498779296875, 0.9442662353515625, 0.9444259643554688, 0.945138671875, 0.9445048217773437, 0.9450762329101563, 0.944775146484375, 0.9441412963867187, 0.9443184814453125, 0.945238037109375, 0.9445673217773437, 0.9456680908203124, 0.9446410522460937, 0.9445181274414063, 0.9456373901367188, 0.9438515014648438, 0.9446522827148438, 0.9439600830078125, 0.944447509765625, 0.9449236450195313, 0.9451837158203125, 0.944015380859375, 0.944595947265625, 0.9445867309570313, 0.944869384765625, 0.9447772216796875, 0.9439764404296875, 0.9448857421875, 0.9445509033203126, 0.9448018188476562, 0.9448990478515625, 0.94510693359375, 0.944690185546875, 0.9443369140625, 0.9444546508789062, 0.9454674072265625, 0.9442078857421875, 0.9450659790039062, 1.9620792236328124, 0.9442550048828126, 0.9450751953125, 0.9438463745117187, 0.9439702758789063, 0.944837646484375, 0.9442098999023437, 0.9442938842773437, 0.9439365234375, 0.94470556640625, 0.9441126098632813, 0.944058349609375, 0.9444188232421875, 0.9445037841796875, 0.943837158203125, 0.9439406127929687, 0.9434613647460938, 0.9443983154296876, 0.944353271484375, 0.9448919067382813, 0.9447465209960938, 0.9449727783203125, 0.9444126586914062, 0.9454131469726562, 0.945016845703125, 0.9439498291015626, 0.9441412963867187, 0.9441597290039062, 0.9443522338867187, 0.9438381958007812, 0.9442887573242188, 0.944189453125, 0.9438013305664062, 0.9440122680664063, 0.9451192016601563, 0.9441802368164063, 0.944279541015625, 0.9441546020507813, 0.944953369140625, 0.9451735229492187, 0.9439866943359375, 0.9443461303710937, 0.9438648071289063, 0.9442754516601563, 0.9446963500976563, 0.944337890625, 0.9436928100585937, 0.9441341552734375, 0.9442744140625, 0.9443624877929687, 0.9439539184570312, 0.9444546508789062, 0.9440614624023438, 0.9443717041015625, 0.9440122680664063, 0.9457305297851563, 0.9445120239257813, 0.9440767822265625, 0.9440235595703125, 0.944637939453125, 0.9447669677734375, 0.9439303588867187, 0.9450260620117188, 1.96250830078125, 0.945138671875, 0.94508544921875, 0.9441658935546875, 0.944089111328125, 0.9439273071289063, 0.944532470703125, 0.9449574584960938, 0.9450444946289063, 0.9448878173828125, 0.9445734252929687, 0.9452164916992187, 0.9463602905273437, 0.9454663696289063, 0.9445283813476563, 0.944996337890625, 0.9449359130859375, 0.9434193725585938, 0.9439549560546875, 0.9449943237304688, 0.9439518432617188, 0.9447526245117187, 0.94417919921875, 0.9459722290039062, 0.943705078125, 0.9437808837890626, 0.9436303100585938, 0.945122314453125, 0.9443286743164062, 0.9444597778320313, 0.9451847534179687, 0.944753662109375, 0.9440460815429688, 0.9454346313476563, 0.9441884155273438, 0.9441730346679688, 0.9445990600585937, 0.9442191162109375, 0.9447506103515625, 0.94445361328125, 0.9448314819335938, 0.9444966430664062, 0.9454448852539062, 0.9437501220703125, 0.9454315795898437, 0.943909912109375, 0.9438177490234375, 0.9439487915039062, 0.944637939453125, 0.944753662109375, 0.9441044311523438, 0.9440993041992187, 0.9442928466796875, 0.9440347900390625, 0.9455585327148438, 0.9453936767578125, 0.9444710693359375, 0.9441751098632812, 0.944163818359375, 0.9446604614257812, 0.9442406616210938, 0.9449768676757813, 0.9444280395507813, 1.9621160888671876, 0.9457469482421875, 0.94449560546875, 0.9444403076171874, 0.9441771240234375, 0.9445068969726562, 0.9441464233398438, 0.9440808715820312, 0.9447423706054687, 0.9440921630859375, 0.9448734741210938, 0.944405517578125, 0.945375244140625, 0.944026611328125, 0.9439928588867188, 0.9442150268554688, 0.9443993530273438, 0.9442857055664062, 0.9451458740234375, 0.9446522827148438, 0.944701416015625, 0.9448027954101562, 0.9459158935546875, 0.9446707153320313, 0.9441566772460938, 0.9440133056640625, 0.9440880737304688, 0.944232421875, 0.9440133056640625, 0.9441699829101563, 0.9440634765625, 0.9437409057617188, 0.943952880859375, 0.944626708984375, 0.944152587890625, 0.9444495239257813, 0.9439201049804687, 0.945048583984375, 0.9437726440429688, 0.9444515991210938, 0.94368359375, 0.9439324340820312, 0.9438914794921875, 0.9448427734375, 0.9442590942382812, 0.9434368286132813, 0.9441812744140625, 0.9437726440429688, 0.9446533203125, 0.9439723510742187, 0.9444556884765625, 0.9440726928710937, 0.9439303588867187, 0.94407373046875, 0.9452779541015625, 0.9441228637695313, 0.9435852661132812, 0.9436907348632813, 0.943984619140625, 0.944078857421875, 0.9446338500976562, 0.9449257202148438, 0.9452236938476563, 1.963953125, 0.9454633178710937, 0.9447341918945312, 0.9460571899414062, 0.94517041015625, 0.9456434936523438, 0.9455032348632812, 0.9461248168945312, 0.9460029296875, 0.9438279418945312, 0.9447096557617187, 0.94533837890625, 0.94419970703125, 0.943636474609375, 0.943752197265625, 0.9444884643554687, 0.9433948364257813, 0.9433794555664062, 0.9446205444335938, 0.9441167602539062, 0.9440307006835937, 0.9440122680664063, 0.9449257202148438, 0.9440286865234375, 0.943515625, 0.9437122802734375, 0.9445457763671875, 0.9440726928710937, 0.9442130126953125, 0.9443328247070313, 0.9440071411132812, 0.9451980590820312, 0.9459169311523438, 0.944890869140625, 0.9443102416992187, 0.9445673217773437, 0.9445437622070313, 0.9452554321289063, 0.9446389770507813, 0.9451735229492187, 0.9450977172851562, 0.944343017578125, 0.9447926025390625, 0.9471918334960937, 0.9450096435546875, 0.9446543579101563, 0.9449779052734375, 0.9458626708984375, 0.9440061645507812, 0.9450895385742187, 0.945090576171875, 0.9444638671875, 0.9445816040039062, 0.944679931640625, 0.9448161010742188, 0.944236572265625, 0.9442642211914063, 0.9439815673828125, 0.9447567138671875, 0.944205810546875, 0.9459619750976562, 0.9442539672851562, 0.9440655517578125]",tokens/s,1.0426884477773943,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1688.977408,2250.768384,0.0,1604.32128,1463.693312,s,10,1.2614907455444335,0.12614907455444335,0.001156305474696835,0.12596041870117186,0.12672372131347656,0.12802366485595704,0.12906361968994143,"[0.1293236083984375, 0.12533795166015624, 0.12527750396728515, 0.1264024658203125, 0.12550204467773438, 0.12507218933105468, 0.1257669143676758, 0.12615392303466796, 0.12621929931640624, 0.12643484497070312]",tokens/s,2029.3450499275416,kWh,1.4795928945144019e-06,8.107423014348568e-07,6.482848936275459e-06,8.773184132224718e-06,tokens/kWh,29179827.545131337,MB,1688.977408,2250.768384,0.0,1604.32128,1560.975872,s,10,72.8006865234375,7.280068652343751,0.00707557452467299,7.2794865722656255,7.288760400390625,7.290637524414063,7.292139223632812,"[7.2925146484375, 7.2867998046875, 7.28834326171875, 7.272900390625, 7.28120458984375, 7.28177783203125, 7.2702080078125, 7.2777685546875, 7.277201171875, 7.27196826171875]",tokens/s,8.653764546535935,kWh,8.591771873335045e-05,4.70891264273655e-05,0.0003703246191761246,0.0005033314643368406,tokens/kWh,125166.02768516574,,s,629,73.8150656280517,0.1173530455135958,0.015000349665490152,0.11545600128173829,0.11595755310058595,0.11627356262207031,0.24130564147949218,"[0.1175910415649414, 0.11764224243164062, 0.11673804473876953, 0.11585126495361328, 0.115281982421875, 0.11545388793945313, 0.11590860748291015, 0.11528498840332031, 0.11539968109130859, 0.11538841247558594, 0.11548159790039063, 0.11523072052001954, 0.11521842956542969, 0.11532492828369141, 0.11577958679199218, 0.11560755157470703, 0.1152696304321289, 0.11566079711914062, 0.1159925765991211, 0.11540889739990234, 0.11546623992919922, 0.11522150421142578, 0.11538944244384766, 0.1153617935180664, 0.11544371032714844, 0.1166714859008789, 0.11574886322021484, 0.1154672622680664, 0.11605299377441407, 0.11584102630615234, 0.1156485137939453, 0.11544882965087891, 0.11554099273681641, 0.115557373046875, 0.11551026916503906, 0.1155788803100586, 0.115725341796875, 0.11555939483642579, 0.11551129913330078, 0.11544371032714844, 0.11543244934082031, 0.11539968109130859, 0.11540684509277344, 0.11523072052001954, 0.11638988494873047, 0.11598438262939453, 0.11537510681152344, 0.11538534545898438, 0.11558604431152343, 0.11559219360351562, 0.11591474914550781, 0.11588198089599609, 0.1159362564086914, 0.11570175933837891, 0.11595673370361329, 0.11578368377685547, 0.11646463775634766, 0.11617485046386719, 0.11598336029052735, 0.11553279876708984, 0.11578470611572265, 0.11579801940917969, 0.24151040649414063, 0.11573862457275391, 0.11605094146728516, 0.11689881896972656, 0.1161523208618164, 0.1156648941040039, 0.11554099273681641, 0.11535564422607422, 0.11528498840332031, 0.11527065277099609, 0.11566387176513672, 0.11533516693115234, 0.11576012420654297, 0.11564749145507812, 0.1155051498413086, 0.11542221069335938, 0.11543961334228516, 0.11525529479980469, 0.11523993682861328, 0.11541913604736329, 0.1155225601196289, 0.1157734375, 0.11563929748535157, 0.11553587341308594, 0.11531571197509766, 0.115378173828125, 0.11524201965332032, 0.11551340484619141, 0.11532073974609375, 0.11577961730957032, 0.11592704010009766, 0.11556451416015626, 0.1154549789428711, 0.11527680206298828, 0.11526656341552734, 0.11538739013671875, 0.11530854034423828, 0.11538432312011719, 0.11546214294433593, 0.11541401672363281, 0.11528192138671875, 0.11616563415527344, 0.11571199798583984, 0.11578880310058594, 0.11551747131347656, 0.11540067291259766, 0.11685273742675781, 0.11557068634033203, 0.11726950073242187, 0.1159393310546875, 0.11543148803710937, 0.11552044677734374, 0.1161891860961914, 0.11548876953125, 0.11572940826416016, 0.1157396469116211, 0.11657421112060547, 0.11594854736328125, 0.11598851013183593, 0.11593417358398438, 0.11551846313476563, 0.11548365020751954, 0.11548569488525391, 0.2411530303955078, 0.11556454467773437, 0.11577139282226563, 0.11626496124267578, 0.11560652923583985, 0.11565161895751953, 0.11566384124755859, 0.11581747436523437, 0.1154672622680664, 0.11527474975585937, 0.11536998748779297, 0.11562598419189453, 0.11528089904785156, 0.11566902160644531, 0.11589116668701172, 0.11617894744873047, 0.1157201919555664, 0.11589734649658204, 0.11571302032470702, 0.11588607788085938, 0.11542323303222657, 0.11556966400146484, 0.1156833267211914, 0.11643392181396485, 0.11568434906005859, 0.11659468841552735, 0.11596083068847657, 0.11616665649414062, 0.11567922973632813, 0.11553794860839844, 0.11543036651611328, 0.11538636779785157, 0.11580620574951171, 0.11554918670654298, 0.11579296112060547, 0.1156412811279297, 0.11582361602783203, 0.11581132507324218, 0.11555328369140624, 0.11571507263183593, 0.11545600128173829, 0.11533926391601562, 0.1156147232055664, 0.1153966064453125, 0.1154119644165039, 0.11549286651611328, 0.11582566070556641, 0.11649638366699219, 0.1159516830444336, 0.11595462036132813, 0.11600281524658203, 0.11591474914550781, 0.11610931396484375, 0.11565261077880859, 0.1153290252685547, 0.11547238159179687, 0.1153290252685547, 0.115346435546875, 0.11534438323974609, 0.11529523468017579, 0.115378173828125, 0.11543551635742187, 0.11540377807617187, 0.24315084838867188, 0.11575193786621094, 0.11506790161132813, 0.11524915313720703, 0.1155962905883789, 0.11531366729736328, 0.11553177642822265, 0.11558297729492187, 0.11519385528564453, 0.11537305450439453, 0.11545811462402343, 0.11599967956542968, 0.11572838592529297, 0.1153966064453125, 0.11536077117919921, 0.11558092498779297, 0.1153617935180664, 0.1152911376953125, 0.11529011535644532, 0.11535564422607422, 0.11525939178466797, 0.1152143325805664, 0.11515494537353516, 0.11549696350097656, 0.11525017547607422, 0.11560550689697266, 0.11576217651367188, 0.11544268798828125, 0.11537203216552734, 0.11532288360595704, 0.11530239868164062, 0.11551334381103516, 0.115162109375, 0.11524813079833984, 0.1151436767578125, 0.11530035400390624, 0.11520614624023437, 0.11535871887207032, 0.11520716857910156, 0.11560345458984375, 0.11539968109130859, 0.11531676483154298, 0.11529417419433594, 0.1153966064453125, 0.11623017883300782, 0.11542422485351563, 0.11526451110839844, 0.11549388885498046, 0.11539762878417968, 0.11577855682373046, 0.1153955841064453, 0.11553279876708984, 0.11524403381347656, 0.11579084777832031, 0.11539968109130859, 0.11569152069091797, 0.11539353942871093, 0.11553997039794922, 0.11545600128173829, 0.11532492828369141, 0.1152696304321289, 0.11538841247558594, 0.1153433609008789, 0.241364990234375, 0.11547853088378907, 0.11533721923828125, 0.11539049530029297, 0.11531874847412109, 0.11551129913330078, 0.1152542724609375, 0.11560758209228515, 0.11542422485351563, 0.1155389404296875, 0.11542118072509766, 0.11545804595947265, 0.1156178207397461, 0.115463134765625, 0.11520716857910156, 0.11564339447021485, 0.11541913604736329, 0.11587276458740234, 0.1153955841064453, 0.11544882965087891, 0.11543654632568359, 0.11536793518066406, 0.11598540496826172, 0.11562393951416015, 0.11578470611572265, 0.11597004699707031, 0.11561062622070313, 0.11569459533691406, 0.11571302032470702, 0.11590144348144531, 0.11557068634033203, 0.11549900817871094, 0.11544678497314453, 0.11544268798828125, 0.11540889739990234, 0.11547443389892578, 0.11561574554443359, 0.11543449401855468, 0.11550624084472656, 0.11566381072998047, 0.11555225372314454, 0.11549286651611328, 0.11550822448730469, 0.11547955322265625, 0.1153064956665039, 0.11546623992919922, 0.11551744079589844, 0.11544371032714844, 0.11622195434570312, 0.11589017486572266, 0.11552665710449218, 0.1153986587524414, 0.11532390594482422, 0.11578470611572265, 0.11576934051513672, 0.11560550689697266, 0.11554713439941407, 0.11664281463623047, 0.11574784088134765, 0.11553997039794922, 0.11533824157714843, 0.11548569488525391, 0.11580518341064452, 0.2409891815185547, 0.11542425537109376, 0.11530342102050781, 0.11544166564941406, 0.115346435546875, 0.11540684509277344, 0.11648614501953125, 0.1165475845336914, 0.11569561767578125, 0.11536281585693359, 0.11529318237304688, 0.11541401672363281, 0.11542221069335938, 0.11558809661865234, 0.11539456176757812, 0.11545702362060548, 0.11533209228515626, 0.11550822448730469, 0.11541913604736329, 0.11535667419433594, 0.11532185363769532, 0.11544985961914063, 0.11528300476074219, 0.11542829132080078, 0.11535564422607422, 0.11552665710449218, 0.11533004760742188, 0.11546419525146484, 0.11538022613525391, 0.11521539306640625, 0.1153545913696289, 0.11576934051513672, 0.11584614562988281, 0.11546214294433593, 0.11551232147216797, 0.11547647857666016, 0.11558092498779297, 0.1154796142578125, 0.11671033477783203, 0.11560243225097656, 0.11540480041503906, 0.11559120178222657, 0.11558499145507813, 0.11560243225097656, 0.11611443328857422, 0.11554815673828125, 0.11593113708496093, 0.11560857391357422, 0.11550003051757812, 0.1153433609008789, 0.11553488159179688, 0.11636220550537109, 0.11614924621582032, 0.11589631652832032, 0.11544371032714844, 0.11543961334228516, 0.11590144348144531, 0.11554303741455078, 0.11543961334228516, 0.11548467254638672, 0.1157918701171875, 0.11545804595947265, 0.11548159790039063, 0.24169778442382814, 0.11536077117919921, 0.11515801239013672, 0.1152573471069336, 0.11527884674072265, 0.11527986907958984, 0.115346435546875, 0.11558502197265624, 0.1152471694946289, 0.11550918579101563, 0.11519078063964844, 0.11538329315185547, 0.11518669128417969, 0.11536281585693359, 0.11518163299560547, 0.11539347076416015, 0.11518678283691407, 0.11522755432128906, 0.11527577972412109, 0.11537920379638672, 0.11528710174560547, 0.11573241424560547, 0.11531775665283203, 0.11599462127685548, 0.11536895751953125, 0.11559219360351562, 0.1152174072265625, 0.11544473266601563, 0.11515187072753906, 0.11589734649658204, 0.11575091552734375, 0.11540377807617187, 0.11524508666992188, 0.11525116729736327, 0.1152573471069336, 0.11555532836914062, 0.1153259506225586, 0.1153617935180664, 0.1152542724609375, 0.11547135925292969, 0.11532083129882813, 0.11537305450439453, 0.11539968109130859, 0.11537612915039062, 0.11517235565185546, 0.11529216003417969, 0.11535257720947266, 0.11539046478271485, 0.11532390594482422, 0.11542940521240234, 0.11545801544189453, 0.115557373046875, 0.11530239868164062, 0.11545193481445312, 0.11529827117919922, 0.11565363311767578, 0.11541299438476563, 0.11608678436279297, 0.11551641845703126, 0.11544064331054688, 0.11531980895996094, 0.11546214294433593, 0.11526348876953126, 0.2421370849609375, 0.11553485107421875, 0.11565363311767578, 0.11698073577880859, 0.11526451110839844, 0.11523788452148437, 0.11507615661621094, 0.11531769561767578, 0.11525939178466797, 0.11527884674072265, 0.11663155364990234, 0.11554815673828125, 0.11534745788574219, 0.11536281585693359, 0.11533106994628907, 0.11543449401855468, 0.11581439971923828, 0.11606221008300781, 0.11575091552734375, 0.11566182708740234, 0.1154867172241211, 0.1154119644165039, 0.11538432312011719, 0.11553075408935547, 0.11544985961914063, 0.1154119644165039, 0.11534540557861328, 0.11543142700195312, 0.11537612915039062, 0.11558502197265624, 0.11556352233886719, 0.1155072021484375, 0.11545394897460938, 0.11533824157714843, 0.11553485107421875, 0.11541094207763672, 0.1153597412109375, 0.11537407684326172, 0.11529216003417969, 0.11547955322265625, 0.11540787506103516, 0.11534950256347656, 0.11540275573730469, 0.11537305450439453, 0.11569664001464844, 0.11560038757324219, 0.11557785797119141, 0.11525635528564453, 0.115210205078125, 0.11607859039306641, 0.11538022613525391, 0.11546419525146484, 0.11593727874755859, 0.11551436614990235, 0.11531263732910156, 0.11544576263427735, 0.11548569488525391, 0.11550924682617188, 0.11583999633789062, 0.11551641845703126, 0.11535871887207032, 0.11538739013671875, 0.11543142700195312, 0.24210124206542968, 0.11537920379638672, 0.11549593353271484, 0.11537612915039062, 0.11512525177001953, 0.11541709136962891, 0.1153280029296875, 0.11576729583740235, 0.115378173828125, 0.11518669128417969, 0.11519593811035156, 0.115463134765625, 0.11528498840332031, 0.11549491119384765, 0.11540172576904296, 0.11594445037841797, 0.11559526062011719, 0.116279296875, 0.11579698944091797, 0.11580210876464844, 0.11537203216552734, 0.11540991973876953, 0.11529318237304688, 0.11539456176757812, 0.1153597412109375, 0.11614412689208985, 0.11539968109130859, 0.11535155487060547, 0.11532492828369141, 0.11529523468017579, 0.11584108734130859, 0.1154241943359375, 0.11634585571289062, 0.11548159790039063, 0.11571302032470702, 0.1153219223022461, 0.1154579849243164, 0.11549388885498046, 0.1154303970336914, 0.11558297729492187, 0.11536383819580077, 0.11582975769042969, 0.11551436614990235, 0.11530035400390624, 0.11534233856201172, 0.11510886383056641, 0.11527168273925781, 0.11539968109130859, 0.11526860809326171, 0.1152573471069336, 0.11550822448730469, 0.11615952301025391, 0.1159669418334961, 0.11542835235595703, 0.11572223663330078, 0.11554918670654298, 0.11545906829833984, 0.11568742370605468, 0.1153812484741211, 0.11539762878417968, 0.11535155487060547, 0.1154303970336914, 0.11551129913330078, 0.24231832885742188, 0.11538432312011719, 0.1153259506225586, 0.11540480041503906, 0.11546419525146484, 0.11534745788574219, 0.11531878662109375, 0.11547138977050782, 0.115283935546875, 0.11556147003173828, 0.11574169921875, 0.11635916900634766, 0.11543142700195312, 0.11534540557861328, 0.11524813079833984, 0.1152911376953125, 0.11551436614990235, 0.11536589050292968, 0.11532083129882813, 0.11543247985839844, 0.11533615875244141, 0.11537407684326172, 0.11525631713867188, 0.11534438323974609, 0.11512217712402344, 0.11535769653320313, 0.11603763580322266, 0.11543449401855468, 0.1153986587524414, 0.11541506958007812, 0.11541094207763672, 0.11551331329345703, 0.11517440032958984, 0.11537715148925781, 0.11538534545898438, 0.11531263732910156, 0.11526758575439452, 0.11542835235595703, 0.11535052490234375, 0.11555430603027343, 0.11530035400390624, 0.11539046478271485, 0.11545398712158203, 0.1154815673828125, 0.11535667419433594, 0.11539250946044922, 0.1153812484741211, 0.1153986587524414, 0.1155051498413086, 0.11556665802001953, 0.11569554901123047, 0.11560447692871094, 0.11522866821289063, 0.11546419525146484, 0.11527577972412109, 0.11578880310058594, 0.11543142700195312, 0.1154119644165039, 0.115346435546875, 0.11526553344726563, 0.11522560119628907, 0.11540480041503906, 0.11531673431396484]",tokens/s,8.5212956819612,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2001.69472,5480.382464,0.0,4833.93536,4503.41376,s,10,5.738609375,0.5738609375000001,0.0014211968686511217,0.5739980163574219,0.575327734375,0.5758431579589843,0.5762554968261718,"[0.5748433227539063, 0.5763585815429687, 0.5723970336914063, 0.5719782104492187, 0.5729453125, 0.57201806640625, 0.5736748046875, 0.5743212280273438, 0.5752131958007812, 0.574859619140625]",tokens/s,446.1011079012499,kWh,6.756110654936897e-06,3.7020347229372414e-06,3.163201604633459e-05,4.209016142420872e-05,tokens/kWh,6082181.472764753,MB,2003.341312,5480.382464,0.0,4833.93536,4688.700416,s,10,334.8389140625,33.48389140625,0.006002777931550277,33.4837109375,33.491801171875004,33.4921896484375,33.4925004296875,"[33.49171484375, 33.492578125, 33.48669921875, 33.4832421875, 33.47827734375, 33.4841796875, 33.4782109375, 33.47846875, 33.4750390625, 33.49050390625]",tokens/s,1.8815017417073754,kWh,0.000395271455562777,0.00021664300892132815,0.0018250813026564506,0.0024369957671405555,tokens/kWh,25851.501610903877,,s,629,339.4681906738281,0.539695056715148,0.0682067154879699,0.5314457397460938,0.5320167358398438,0.5323411499023437,1.1045171826171876,"[0.5315440673828125, 0.5327882080078125, 0.5320939331054687, 0.531873779296875, 0.5308590087890624, 0.5313822631835937, 0.530882568359375, 0.5313515625, 0.530988037109375, 0.531061767578125, 0.530872314453125, 0.5314385986328125, 0.5312184448242188, 0.531462158203125, 0.531262451171875, 0.5317355346679687, 0.5311897583007813, 0.5321195678710937, 0.5312982788085937, 0.5320591430664062, 0.5310802001953125, 0.531641357421875, 0.5311631469726562, 0.5312388916015625, 0.5309296875, 0.5314119873046875, 0.5308661499023437, 0.53139453125, 0.5312368774414062, 0.5312471313476562, 0.5308395385742187, 0.5312962646484375, 0.5309542236328125, 0.531441650390625, 0.531462158203125, 0.532063232421875, 0.5323673706054688, 0.5327810668945312, 0.5322188720703125, 0.5317089233398438, 0.5315245971679687, 0.531757080078125, 0.5315543212890625, 0.5318953247070313, 0.5315266723632812, 0.5318656005859375, 0.5314826049804687, 0.53178369140625, 0.5316423950195313, 0.5318696899414063, 0.5316649169921875, 0.53180517578125, 0.531388427734375, 0.5319229736328125, 0.5317713623046875, 0.5329234008789062, 0.5328568115234374, 0.5318778686523438, 0.531589111328125, 0.5319567260742187, 0.5317099609375, 0.5322915649414063, 1.1084400634765625, 0.53127783203125, 0.5318246459960938, 0.53123583984375, 0.5316290283203124, 0.531357666015625, 0.5316055297851563, 0.5315020751953125, 0.531830810546875, 0.5315850219726562, 0.532305908203125, 0.5320980224609375, 0.5330299072265625, 0.5324052734375, 0.5326130981445313, 0.53216357421875, 0.5326397705078125, 0.5320242919921875, 0.5325066528320312, 0.532401123046875, 0.5316433715820312, 0.5314949340820313, 0.5316167602539063, 0.5315225830078125, 0.5317222290039062, 0.5312593994140625, 0.5317662963867188, 0.5315317993164063, 0.5316321411132813, 0.5320345458984375, 0.5328107299804687, 0.5314774780273438, 0.531272705078125, 0.530861083984375, 0.53119384765625, 0.5307944946289063, 0.5313526000976563, 0.530988037109375, 0.5313013916015625, 0.530966552734375, 0.5314918212890625, 0.5308528442382813, 0.5315717163085938, 0.53110888671875, 0.5315563354492188, 0.530924560546875, 0.533138427734375, 0.531162109375, 0.5319669799804687, 0.5311057739257813, 0.5323171997070313, 0.53113037109375, 0.5316034545898437, 0.5314006958007812, 0.5314744262695312, 0.5310115966796874, 0.5314641723632813, 0.53139453125, 0.5315348510742187, 0.5311682739257813, 0.53148876953125, 0.5309757690429687, 0.5314242553710937, 1.1040880126953125, 0.5311743774414063, 0.5314990234375, 0.531357666015625, 0.53203662109375, 0.5313760986328125, 0.531758056640625, 0.5313689575195313, 0.53195263671875, 0.5313187866210938, 0.5315809326171875, 0.5311826171875, 0.5317130126953125, 0.5317140502929687, 0.5317130126953125, 0.5313382568359375, 0.5315819702148438, 0.53187890625, 0.5318358764648438, 0.531926025390625, 0.5318963012695312, 0.5314027709960938, 0.53145703125, 0.531620849609375, 0.5316331787109375, 0.5310167236328125, 0.5312348022460938, 0.5310382080078125, 0.5314088745117187, 0.5308682250976563, 0.5315399780273438, 0.5312174072265625, 0.5312706298828125, 0.5310443725585937, 0.5318563842773437, 0.531294189453125, 0.5317816162109374, 0.5314345092773437, 0.5318461303710937, 0.5316024169921875, 0.5316915283203125, 0.5309102172851563, 0.531857421875, 0.5316290283203124, 0.5315747680664062, 0.5310914306640625, 0.5317744750976563, 0.5314283447265625, 0.53144677734375, 0.5316792602539062, 0.5314396362304687, 0.531156982421875, 0.5315000610351562, 0.5313597412109375, 0.531751953125, 0.5312440185546875, 0.5317304077148437, 0.5317755126953125, 0.5315819702148438, 0.531620849609375, 0.5316690063476562, 0.5314795532226563, 0.531746826171875, 1.10517041015625, 0.5310320434570313, 0.5314959106445313, 0.531240966796875, 0.5316321411132813, 0.5310750732421875, 0.5315758056640625, 0.5309490966796875, 0.5315245971679687, 0.531431396484375, 0.5313065185546875, 0.5310504760742187, 0.53180517578125, 0.5314457397460938, 0.5317089233398438, 0.53155224609375, 0.5318389892578125, 0.53121435546875, 0.5315266723632812, 0.5316557006835938, 0.5317037963867187, 0.5315112915039063, 0.5319659423828125, 0.5315870971679687, 0.5313024291992188, 0.5310637817382813, 0.53197412109375, 0.5312553100585937, 0.5316925659179688, 0.5314713745117188, 0.53150927734375, 0.5313689575195313, 0.531694580078125, 0.5312471313476562, 0.5318174438476563, 0.5313341674804688, 0.5315020751953125, 0.5311109008789062, 0.5313853149414063, 0.5311262817382812, 0.53142529296875, 0.531198974609375, 0.53186767578125, 0.5312348022460938, 0.5325199584960938, 0.5314805908203125, 0.5316055297851563, 0.531357666015625, 0.5316454467773437, 0.5315297241210938, 0.5319075927734375, 0.5314221801757812, 0.5315809326171875, 0.531282958984375, 0.531325927734375, 0.5312184448242188, 0.5315061645507813, 0.5310894165039063, 0.5316116333007812, 0.5311078491210938, 0.5315430297851562, 0.5313955688476563, 0.5316587524414063, 1.10468408203125, 0.5308170166015626, 0.5314140014648437, 0.5310218505859375, 0.53187890625, 0.5309481201171875, 0.5317294311523437, 0.5310084838867187, 0.5319915771484375, 0.530977783203125, 0.5312849731445313, 0.5309696044921876, 0.531294189453125, 0.5310863647460937, 0.5315205078125, 0.5311488037109375, 0.531873779296875, 0.5310392456054688, 0.5315072021484375, 0.531240966796875, 0.53151025390625, 0.531162109375, 0.531541015625, 0.5314877319335938, 0.5313935546875, 0.5315809326171875, 0.5316167602539063, 0.531219482421875, 0.53136279296875, 0.5309470825195313, 0.5313136596679687, 0.5309798583984375, 0.5316341552734375, 0.5309798583984375, 0.5316966552734375, 0.5313157348632812, 0.531535888671875, 0.5310628051757813, 0.5316270141601562, 0.5313607788085938, 0.5316013793945312, 0.5313177490234375, 0.5321758422851562, 0.53148876953125, 0.5317089233398438, 0.53140478515625, 0.5316423950195313, 0.5311181030273437, 0.5316239624023438, 0.5310228271484375, 0.5313597412109375, 0.5311314086914063, 0.5313966064453125, 0.5312317504882812, 0.531378173828125, 0.5308600463867188, 0.5313966064453125, 0.5326162109375, 0.5314959106445313, 0.5313351440429688, 0.5318082275390625, 0.5313423461914063, 0.5320233154296875, 1.1039805908203124, 0.530788330078125, 0.5315082397460937, 0.5307863159179688, 0.53119384765625, 0.5309398803710937, 0.5312583618164063, 0.5307330322265625, 0.5312890625, 0.5308733520507812, 0.5311170654296875, 0.5309942016601562, 0.5312973022460937, 0.5311661987304688, 0.5314498291015625, 0.5309091796875, 0.532263916015625, 0.531462158203125, 0.5319485473632812, 0.531884033203125, 0.531937255859375, 0.5315133666992188, 0.532031494140625, 0.5308630981445313, 0.5312614135742187, 0.5308876953125, 0.5313873901367188, 0.5314078979492187, 0.5316280517578125, 0.531314697265625, 0.53157373046875, 0.5310853271484375, 0.5314744262695312, 0.5311897583007813, 0.5319854125976563, 0.53146728515625, 0.5319290771484375, 0.5310658569335938, 0.533580810546875, 0.5315614624023437, 0.5317345581054688, 0.5312808837890625, 0.531472412109375, 0.531282958984375, 0.5319321899414062, 0.5314600830078124, 0.53201611328125, 0.5309706420898438, 0.5315635375976563, 0.53127783203125, 0.5316904907226563, 0.5316259765625, 0.5321615600585937, 0.5318072509765625, 0.5314979858398438, 0.5314017333984375, 0.5320017700195312, 0.5319782104492188, 0.5316690063476562, 0.5314088745117187, 0.5318072509765625, 0.5312388916015625, 0.531979248046875, 1.1056363525390625, 0.5310975952148438, 0.5316484985351563, 0.5309685668945312, 0.5316218872070313, 0.5310679321289062, 0.53184716796875, 0.5311968994140625, 0.5316198120117187, 0.5313331298828124, 0.53146728515625, 0.5315911865234375, 0.5316013793945312, 0.5314119873046875, 0.5317488403320313, 0.5311682739257813, 0.5313904418945312, 0.5310167236328125, 0.5313966064453125, 0.5309951782226563, 0.5314457397460938, 0.5311436767578125, 0.5312788696289062, 0.5310361328125, 0.5313966064453125, 0.5309634399414063, 0.531441650390625, 0.5314631958007813, 0.5321011352539062, 0.5310320434570313, 0.53186865234375, 0.5309337768554687, 0.5312890625, 0.5309788208007813, 0.53139453125, 0.53129931640625, 0.5314539794921875, 0.5309747314453125, 0.5315932006835937, 0.5313382568359375, 0.5317478637695312, 0.5309747314453125, 0.53136279296875, 0.5310310668945313, 0.5313843383789062, 0.5313546142578125, 0.531884033203125, 0.5320816650390625, 0.5317007446289063, 0.5317181396484375, 0.5314928588867187, 0.531162109375, 0.53182568359375, 0.5310238647460938, 0.5315983276367188, 0.5310853271484375, 0.5314877319335938, 0.5318953247070313, 0.5316690063476562, 0.5312655639648437, 0.531473388671875, 0.5310628051757813, 0.5314242553710937, 1.1063428955078125, 0.5311477661132813, 0.5320335083007812, 0.5312921752929688, 0.5317335205078125, 0.530977783203125, 0.5313136596679687, 0.5312286987304687, 0.5314478149414062, 0.5310218505859375, 0.5314529418945313, 0.5309644775390625, 0.5314129638671875, 0.5308477172851562, 0.5313074951171874, 0.5311713256835937, 0.5313065185546875, 0.5310802001953125, 0.531578857421875, 0.5314457397460938, 0.5317069091796875, 0.53136279296875, 0.531314697265625, 0.531267578125, 0.5314457397460938, 0.5310955810546875, 0.5312737426757812, 0.53108837890625, 0.5317027587890625, 0.5308856201171875, 0.5313976440429687, 0.5313341674804688, 0.5313167114257813, 0.5309122314453125, 0.5314457397460938, 0.5309849853515625, 0.5314765014648437, 0.5310156860351563, 0.5319966430664063, 0.5313443603515625, 0.5320386352539063, 0.531367919921875, 0.5315552978515625, 0.530850830078125, 0.5315706787109375, 0.5309685668945312, 0.5314590454101562, 0.5309583129882812, 0.5313720092773437, 0.5309685668945312, 0.5319751586914062, 0.531578857421875, 0.5315164184570312, 0.5312655639648437, 0.5314447631835938, 0.5310504760742187, 0.5314877319335938, 0.5322485961914063, 0.5322516479492188, 0.531857421875, 0.5321328735351563, 0.5316331787109375, 0.531778564453125, 1.1072911376953125, 0.5309224853515625, 0.5317345581054688, 0.5315655517578125, 0.531462158203125, 0.53136279296875, 0.5313065185546875, 0.530951171875, 0.5313351440429688, 0.5309603881835937, 0.53127783203125, 0.5310310668945313, 0.5318154296875, 0.531683349609375, 0.5316904907226563, 0.5310494995117188, 0.5312767944335938, 0.5310556030273438, 0.5316259765625, 0.53110986328125, 0.5314088745117187, 0.5309767456054687, 0.5314846801757812, 0.5312081909179688, 0.5313607788085938, 0.5309849853515625, 0.53131982421875, 0.5310699462890625, 0.5314191284179688, 0.5309747314453125, 0.5317734375, 0.531103759765625, 0.5316280517578125, 0.5309747314453125, 0.531689453125, 0.531009521484375, 0.5314703369140625, 0.53123583984375, 0.53159423828125, 0.5309818725585937, 0.5314191284179688, 0.5310842895507812, 0.5318082275390625, 0.531125244140625, 0.5315932006835937, 0.531051513671875, 0.5314324340820312, 0.5311846313476563, 0.5314447631835938, 0.5312880859375, 0.5315921630859375, 0.5316597900390625, 0.5315440673828125, 0.53146826171875, 0.53159423828125, 0.5313331298828124, 0.5315133666992188, 0.5311201171875, 0.5314662475585937, 0.5311539306640625, 0.5316423950195313, 0.531114990234375, 0.5314866943359375, 1.1065169677734374, 0.5308661499023437, 0.5313402709960937, 0.5308170166015626, 0.5315850219726562, 0.5312532348632812, 0.5315512084960937, 0.53100341796875, 0.5315338134765625, 0.531156982421875, 0.5314345092773437, 0.5309490966796875, 0.5311846313476563, 0.5308651733398437, 0.5314539794921875, 0.5309276123046875, 0.531431396484375, 0.5310126342773438, 0.5312388916015625, 0.5309061279296875, 0.531294189453125, 0.5308026733398438, 0.531325927734375, 0.5314857177734374, 0.5316792602539062, 0.5311539306640625, 0.5321492309570313, 0.5309573364257812, 0.5314765014648437, 0.5313320922851562, 0.5315184936523437, 0.5314457397460938, 0.5313597412109375, 0.530850830078125, 0.5313320922851562, 0.531251220703125, 0.5314334716796875, 0.5309030151367188, 0.531431396484375, 0.531162109375, 0.5314017333984375, 0.53098291015625, 0.5322833862304688, 0.532005859375, 0.5324338989257813, 0.53241650390625, 0.5326049194335938, 0.5322711181640625, 0.5329356689453125, 0.5318450927734375, 0.5316024169921875, 0.5314273071289063, 0.5318809814453125, 0.53153076171875, 0.5320017700195312, 0.5319137573242188, 0.5322250366210938, 0.5320192260742187, 0.5323571166992187, 0.5319556884765625, 0.53264794921875, 0.5321123657226563, 0.5325609130859374]",tokens/s,1.8528982016001712,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1674.596352,5516.034048,0.0,4869.586944,4743.593472,s,10,6.137113647460938,0.6137113647460938,0.0032895717697753717,0.6129547424316406,0.6153997436523437,0.6190885131835938,0.6220395288085938,"[0.6227772827148438, 0.6144583129882812, 0.61261669921875, 0.61060546875, 0.612577392578125, 0.6105811157226563, 0.6121093139648438, 0.6145800170898438, 0.6132927856445313, 0.6135152587890625]",tokens/s,417.13420136176387,kWh,7.2172448039054885e-06,3.954746732982612e-06,3.355963469082604e-05,4.473162622771414e-05,tokens/kWh,5723020.189268045,MB,1674.596352,5516.034048,0.0,4869.586944,4769.651712,s,10,361.12618359375006,36.112618359375006,0.009999043540991457,36.112980468749996,36.125964453125,36.127720507812505,36.1291253515625,"[36.1294765625, 36.0964296875, 36.12557421875, 36.1174140625, 36.11404296875, 36.1114296875, 36.11191796875, 36.09769140625, 36.10714453125, 36.1150625]",tokens/s,1.7445425688343892,kWh,0.00042614284929302004,0.0002335616249413365,0.0019425026683799653,0.0026022071426143214,tokens/kWh,24210.217153084406,,s,629,366.0647033081055,0.5819788605852233,0.07282065333686359,0.5730969848632812,0.5741784912109374,0.5745491943359375,1.1854891552734375,"[0.5736980590820312, 0.573138916015625, 0.5735465087890625, 0.574129150390625, 0.5737092895507813, 0.57447216796875, 0.5745264892578125, 0.5731635131835937, 0.5737205810546875, 0.5735956420898437, 0.5749248046875, 0.5725880126953125, 0.5736376342773437, 0.57371337890625, 0.573749267578125, 0.5733990478515625, 0.5727344360351563, 0.57394482421875, 0.57497705078125, 0.5740851440429687, 0.5741229858398438, 0.5742008056640625, 0.5734738159179688, 0.574509033203125, 0.5738157958984375, 0.573897705078125, 0.5732402954101562, 0.5745704956054688, 0.5727620849609375, 0.5734061889648437, 0.5746319580078125, 0.5745643310546875, 0.5739735107421875, 0.5731901245117188, 0.5725511474609375, 0.5742970581054687, 0.5729157104492187, 0.572837890625, 0.57303857421875, 0.5725552368164063, 0.5740175170898437, 0.5737943115234375, 0.57314404296875, 0.5743441772460938, 0.5747333374023438, 0.5747230834960938, 0.5726239013671875, 0.5742520141601563, 0.5729638671875, 0.5725716552734375, 0.572348388671875, 0.572031005859375, 0.5730672607421875, 0.5724190673828125, 0.572590087890625, 0.5723648071289062, 0.5724487915039063, 0.572526611328125, 0.5727549438476562, 0.57301708984375, 0.5722869873046875, 0.5721016235351563, 1.18763623046875, 0.5728788452148438, 0.5735679931640625, 0.5728031005859375, 0.5727078247070313, 0.5732781982421875, 0.5730969848632812, 0.5728460693359375, 0.57259521484375, 0.5722409057617187, 0.57280615234375, 0.57242724609375, 0.57225830078125, 0.5726760864257813, 0.572142578125, 0.5723013305664062, 0.5727958984375, 0.5730846557617187, 0.572316650390625, 0.5732341918945313, 0.57354443359375, 0.5724467163085938, 0.5723648071289062, 0.5720924072265625, 0.5729249267578125, 0.5723299560546875, 0.5724815063476563, 0.572590087890625, 0.5723525390625, 0.5726453857421875, 0.572458984375, 0.5725296630859374, 0.5720729370117188, 0.572706787109375, 0.5740257568359375, 0.572758056640625, 0.5724251708984375, 0.5737236328125, 0.57303857421875, 0.5752218017578125, 0.5730856323242187, 0.57284814453125, 0.5734194946289063, 0.5729658813476562, 0.5726095581054688, 0.5726730346679687, 0.5732310791015625, 0.5731287231445312, 0.5729985961914063, 0.572822509765625, 0.5728573608398437, 0.5731840209960938, 0.5744937133789062, 0.5729187622070312, 0.5740841064453125, 0.5739561157226563, 0.5734850463867187, 0.5729812622070313, 0.5727211303710937, 0.5733560180664062, 0.5732577514648437, 0.5737369384765625, 0.5736038208007812, 1.18655078125, 0.5729157104492187, 0.5731143798828126, 0.5728522338867188, 0.5734451293945313, 0.5739530029296875, 0.572958740234375, 0.572759033203125, 0.5748234252929687, 0.57375537109375, 0.573359130859375, 0.5733130493164063, 0.5738352661132813, 0.57318603515625, 0.5733314819335937, 0.5734747924804687, 0.5750180053710937, 0.5730693359375, 0.5736345825195313, 0.573718505859375, 0.57335498046875, 0.5729044189453125, 0.5738772583007813, 0.5738352661132813, 0.5730682983398437, 0.5731051635742187, 0.5734297485351563, 0.5730928344726562, 0.5731317749023438, 0.5730283813476562, 0.573849609375, 0.57297509765625, 0.5731461181640625, 0.5727354736328125, 0.5730130004882813, 0.5731461181640625, 0.5728123168945313, 0.5729003295898437, 0.5728778076171875, 0.5738741455078125, 0.5740830688476563, 0.572737548828125, 0.5726536254882812, 0.5742734985351563, 0.5730897827148438, 0.5729822998046875, 0.5732136840820312, 0.5756242065429688, 0.5729976196289063, 0.5738700561523438, 0.573106201171875, 0.573149169921875, 0.5729013671875, 0.573991943359375, 0.573201416015625, 0.5733877563476563, 0.5732608032226563, 0.5743902587890625, 0.5742141723632812, 0.5728829345703125, 0.5743646850585937, 0.5734010620117187, 0.573259765625, 1.1840142822265625, 0.5737728271484375, 0.5731215209960937, 0.5732106323242188, 0.5732505493164063, 0.572969970703125, 0.573033447265625, 0.57314306640625, 0.5731942138671875, 0.5728460693359375, 0.5744957275390625, 0.5742643432617187, 0.5740144653320313, 0.5731768798828125, 0.5741864624023437, 0.5734174194335937, 0.573322265625, 0.5733375854492188, 0.5739192504882813, 0.5734788818359375, 0.5737677001953125, 0.5734850463867187, 0.5734430541992187, 0.5732044677734375, 0.5726986083984374, 0.572821533203125, 0.5730499267578125, 0.5735310668945313, 0.574656494140625, 0.5733201904296875, 0.5725225219726563, 0.5739570922851562, 0.5737728271484375, 0.57303857421875, 0.5733232421875, 0.5734522705078124, 0.5738311767578125, 0.575151123046875, 0.5733201904296875, 0.5735167846679687, 0.5731133422851562, 0.5729003295898437, 0.5728235473632812, 0.573318115234375, 0.5730816040039063, 0.5742418212890625, 0.573048828125, 0.5728880615234375, 0.572990478515625, 0.5729679565429687, 0.5725675659179688, 0.5726566162109376, 0.5728235473632812, 0.5745910034179688, 0.5722357788085938, 0.5722327270507812, 0.572821533203125, 0.5726668701171875, 0.5724385375976563, 0.5728890991210938, 0.5728051147460937, 0.5724405517578125, 0.5733775634765625, 1.1864791259765626, 0.5730631713867187, 0.5741793212890625, 0.574244873046875, 0.5729197998046875, 0.5730785522460937, 0.5742643432617187, 0.5736028442382812, 0.5729208374023438, 0.5731768188476563, 0.573612060546875, 0.5728307495117188, 0.5726996459960938, 0.57314306640625, 0.5741906127929688, 0.572669921875, 0.5738731689453125, 0.5743124389648437, 0.5728778076171875, 0.5729352416992187, 0.574119873046875, 0.5733079223632812, 0.5724866333007812, 0.572231689453125, 0.5732946166992188, 0.5724682006835937, 0.5730529174804687, 0.5733519287109375, 0.573106201171875, 0.5727999877929687, 0.57236376953125, 0.5724620971679687, 0.5725665893554688, 0.5737697143554688, 0.5733406372070312, 0.573254638671875, 0.5726945190429688, 0.5743114013671875, 0.5735679931640625, 0.5726617431640625, 0.5724610595703125, 0.57396533203125, 0.5732372436523437, 0.5741332397460938, 0.5733570556640625, 0.5733375854492188, 0.572717041015625, 0.572859375, 0.5731901245117188, 0.572558349609375, 0.5723750610351562, 0.5745213623046875, 0.573633544921875, 0.5729638671875, 0.573412353515625, 0.5735505981445312, 0.5726494750976563, 0.5753558959960937, 0.5728399658203125, 0.573432861328125, 0.5723391723632812, 0.5729290161132813, 0.572626953125, 1.1842232666015624, 0.5728092041015626, 0.572779541015625, 0.572632080078125, 0.5737963256835937, 0.5736775512695312, 0.5726033935546875, 0.572564453125, 0.5733416748046875, 0.5725654907226563, 0.5723832397460937, 0.5723812255859375, 0.5731624755859375, 0.5723832397460937, 0.5729197998046875, 0.572788818359375, 0.5723237915039062, 0.5723801879882813, 0.5726771850585938, 0.5727251586914063, 0.5726986083984374, 0.57333349609375, 0.5739939575195312, 0.5731481323242188, 0.5727344360351563, 0.5737195434570312, 0.5727918090820312, 0.5725234985351563, 0.5729740600585937, 0.5731522827148438, 0.5736734619140625, 0.5729310913085938, 0.5734471435546875, 0.573127685546875, 0.5729720458984375, 0.5730723876953125, 0.5730529174804687, 0.5730682983398437, 0.5760235595703125, 0.5742151489257813, 0.573750244140625, 0.5731143798828126, 0.573844482421875, 0.5748480224609375, 0.5746339721679687, 0.5739223022460938, 0.5732044677734375, 0.5739735107421875, 0.5730263061523437, 0.5728583984375, 0.5729924926757812, 0.5730785522460937, 0.5734502563476562, 0.5732157592773437, 0.573191162109375, 0.5728983154296875, 0.5738905639648437, 0.5733457641601563, 0.57269873046875, 0.572650390625, 0.5734747924804687, 0.5728338012695312, 0.5733191528320313, 1.1859814453125, 0.57253271484375, 0.5722654418945312, 0.5727242431640625, 0.5725839233398438, 0.5722265625, 0.572601318359375, 0.5729863891601562, 0.572706787109375, 0.5726239013671875, 0.5739888916015625, 0.5729249267578125, 0.573317138671875, 0.5733673095703125, 0.5743882446289063, 0.5732106323242188, 0.5734819946289063, 0.5727047729492187, 0.5730140380859375, 0.572675048828125, 0.5729136352539063, 0.5729924926757812, 0.5723709716796875, 0.5744793701171875, 0.57339599609375, 0.5728604125976563, 0.5722327270507812, 0.5737615356445313, 0.5753026733398438, 0.5738475341796875, 0.5734512939453125, 0.5752913818359375, 0.5733673095703125, 0.5729085693359375, 0.5732177734375, 0.573886474609375, 0.5727651977539062, 0.57282763671875, 0.57284814453125, 0.5731399536132813, 0.5727467651367187, 0.5733314819335937, 0.5736110229492187, 0.5734830322265625, 0.5729003295898437, 0.574867431640625, 0.5740697631835937, 0.572969970703125, 0.5728942260742188, 0.5735751342773437, 0.5731563720703124, 0.5731113891601562, 0.5729085083007812, 0.5745828247070313, 0.5727620849609375, 0.5729269409179687, 0.5724876708984376, 0.573665283203125, 0.5732260131835938, 0.5726268920898437, 0.57246923828125, 0.572621826171875, 0.5729013671875, 1.1888896484375, 0.57364990234375, 0.5738916015625, 0.5726515502929688, 0.5726239013671875, 0.5722838745117188, 0.5730723876953125, 0.573032470703125, 0.5727662353515625, 0.5732413330078125, 0.5728880615234375, 0.5727396240234375, 0.5731368408203125, 0.573233154296875, 0.5731963500976562, 0.5735249633789062, 0.5744219970703125, 0.5727999877929687, 0.5726064453125, 0.5729290771484375, 0.5737103271484375, 0.572811279296875, 0.5732679443359375, 0.5726730346679687, 0.5734287109375, 0.5722675170898438, 0.5725828857421875, 0.5725849609375, 0.5723627319335938, 0.5727416381835938, 0.5728348388671874, 0.5723678588867187, 0.5728604125976563, 0.573675537109375, 0.57333349609375, 0.5725634765625, 0.5726546020507812, 0.5731399536132813, 0.5728031005859375, 0.5724630737304688, 0.57310205078125, 0.5734573974609375, 0.5732689819335938, 0.5729290161132813, 0.5729782104492187, 0.5726064453125, 0.572527587890625, 0.5728818969726562, 0.5733007202148438, 0.5730426635742187, 0.5736365966796875, 0.5730549926757813, 0.573111328125, 0.5725552368164063, 0.5726505126953125, 0.5732802734375, 0.5726494750976563, 0.5724876708984376, 0.5733058471679687, 0.5727211303710937, 0.5725931396484375, 0.5726648559570312, 0.5728655395507812, 1.1863818359375, 0.5727938842773438, 0.5733508911132813, 0.5740676879882812, 0.574234619140625, 0.5727047729492187, 0.5733447875976563, 0.5732515869140625, 0.5743012084960938, 0.5728123168945313, 0.5730969848632812, 0.5729976196289063, 0.5739694213867188, 0.5728798828125, 0.5730140380859375, 0.5724334106445312, 0.5727744140625, 0.5740390625, 0.5736099853515625, 0.5726300048828125, 0.572969970703125, 0.5736683349609375, 0.572948486328125, 0.5725542602539062, 0.5725962524414062, 0.5727139892578125, 0.573559814453125, 0.572568603515625, 0.5725419311523438, 0.5730426635742187, 0.5727252197265625, 0.5726781616210938, 0.5734676513671875, 0.573179931640625, 0.5726351318359375, 0.573233154296875, 0.5737062377929687, 0.5729423217773437, 0.5729710083007813, 0.5735885009765626, 0.5732741088867187, 0.5730344848632812, 0.5727262573242188, 0.5732567138671875, 0.5732689819335938, 0.5727232055664062, 0.5727559814453125, 0.5730703125, 0.5724334716796875, 0.5731378784179687, 0.5730191650390625, 0.572896240234375, 0.5732177734375, 0.5728727416992188, 0.5729361572265625, 0.5730979614257813, 0.574118896484375, 0.573497314453125, 0.5732976684570312, 0.5737840576171875, 0.5730130004882813, 0.5730549926757813, 0.572958740234375, 1.187092529296875, 0.5724129028320313, 0.5726473999023437, 0.57280615234375, 0.572416015625, 0.5723361206054688, 0.572821533203125, 0.5732761840820313, 0.5727733764648437, 0.573095947265625, 0.5734052124023438, 0.573053955078125, 0.572674072265625, 0.5729341430664062, 0.5734830322265625, 0.573849609375, 0.5724999389648437, 0.5727334594726563, 0.5741107177734375, 0.5736663208007813, 0.57402978515625, 0.5733611450195313, 0.572958740234375, 0.5730099487304687, 0.5733345336914063, 0.5739038696289063, 0.575910888671875, 0.574392333984375, 0.5739898681640625, 0.5732730712890625, 0.5733109741210938, 0.5730928344726562, 0.5741782836914062, 0.5731993408203125, 0.57302734375, 0.5732771606445313, 0.572506103515625, 0.5729556274414063, 0.5733088989257813, 0.572968994140625, 0.5736283569335937, 0.5738291015625, 0.5733499145507812, 0.5729085693359375, 0.573539306640625, 0.5743226928710937, 0.5736959838867187, 0.5733365478515625, 0.57354443359375, 0.5733949584960938, 0.5731727294921874, 0.5731215209960937, 0.5734061889648437, 0.5725133056640626, 0.5726607055664062, 0.5729464111328125, 0.5725101928710937, 0.57212109375, 0.5734563598632813, 0.5726597290039063, 0.5727313842773437, 0.572416015625, 0.5735731201171875]",tokens/s,1.7182754696526694,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1460.260864,1709.703168,0.0,1063.256064,942.605312,s,10,0.8792122802734375,0.08792122802734374,0.0017793873831683257,0.08834756851196289,0.08902708129882812,0.09042998733520508,0.09155231216430663,"[0.09183289337158203, 0.08871532440185546, 0.08646198272705079, 0.08839430236816406, 0.08854621124267578, 0.08836022186279296, 0.08833491516113282, 0.08542150115966797, 0.08774205017089844, 0.08540287780761718]",tokens/s,2911.6972742962976,kWh,1.0152025072210082e-06,5.56282725791299e-07,2.6184398915211728e-06,4.1899251245334795e-06,tokens/kWh,61098943.869194776,MB,1460.588544,1709.703168,0.0,1063.256064,942.607872,s,10,54.0859736328125,5.40859736328125,0.07705683066534309,5.45104150390625,5.477568017578125,5.481690747070313,5.484988930664063,"[5.4858134765625, 5.457708984375, 5.47665185546875, 5.46595458984375, 5.472109375, 5.4443740234375, 5.39297412109375, 5.29275341796875, 5.29816357421875, 5.29947021484375]",tokens/s,11.648121641981426,kWh,6.466018401433034e-05,3.543631727276219e-05,0.00015379821301947998,0.0002538947143065725,tokens/kWh,248134.35038245353,,s,629,54.78888442230225,0.08710474470954252,0.010581359575427846,0.08675225830078125,0.08761917724609375,0.0884242431640625,0.17260260864257815,"[0.08388607788085937, 0.08398233795166016, 0.08383897399902343, 0.08367718505859376, 0.08453632354736328, 0.0890890884399414, 0.08862201690673828, 0.08834764862060547, 0.08846745300292969, 0.08839679718017578, 0.08905522918701173, 0.08837939453125, 0.08853196716308594, 0.08820326232910156, 0.08852684783935547, 0.08853094482421875, 0.08833126068115234, 0.0883599395751953, 0.08833229064941406, 0.08829747009277343, 0.08771071624755859, 0.08838553619384766, 0.08845721435546874, 0.08875212860107422, 0.08825856018066407, 0.08850534057617188, 0.08816230773925782, 0.08822886657714844, 0.08858009338378907, 0.0882339859008789, 0.08867430114746094, 0.08864665222167968, 0.08844902038574219, 0.08828108978271484, 0.0884295654296875, 0.08401407623291016, 0.08382259368896484, 0.0838635482788086, 0.08400077056884765, 0.08771788787841797, 0.0872069091796875, 0.08849203491210937, 0.08865280151367187, 0.08827187347412109, 0.08736563110351563, 0.08729804992675781, 0.08718950653076171, 0.08671231842041016, 0.08691506958007812, 0.08681779479980468, 0.0866529312133789, 0.086940673828125, 0.08705535888671875, 0.08424960327148437, 0.08400383758544921, 0.08397414398193359, 0.08412364959716796, 0.08525926208496094, 0.08683622741699219, 0.08680038452148438, 0.08671949005126953, 0.08692940521240235, 0.1763768310546875, 0.08659661102294922, 0.08731238555908204, 0.0868884506225586, 0.08696832275390624, 0.08851148986816407, 0.08714854431152344, 0.08598323059082032, 0.08636518096923829, 0.08685465240478515, 0.08704512023925781, 0.08686284637451172, 0.08745779418945313, 0.08453529357910156, 0.0869775390625, 0.08696627044677735, 0.08664575958251954, 0.0876072998046875, 0.08689766693115235, 0.08705535888671875, 0.08679936218261719, 0.0866550064086914, 0.08538006591796875, 0.08647270202636718, 0.08609996795654297, 0.08713215637207031, 0.08740147399902344, 0.08702365112304687, 0.08689250946044921, 0.08694169616699218, 0.08711577606201172, 0.08716902160644531, 0.08723149108886719, 0.0867747802734375, 0.08696012878417969, 0.08699494171142579, 0.0842239990234375, 0.08377855682373046, 0.08404684448242188, 0.08385945892333985, 0.08593510437011719, 0.08742301177978516, 0.08683209228515625, 0.08660889434814453, 0.0867952651977539, 0.08660377502441406, 0.08784178924560547, 0.08544153594970703, 0.08774246215820312, 0.08707891082763672, 0.08705331420898438, 0.08723865509033203, 0.08705843353271485, 0.08711577606201172, 0.08695193481445312, 0.08490188598632813, 0.08705741119384766, 0.08451789093017578, 0.08685260772705078, 0.08724787139892579, 0.08672563171386719, 0.08705638122558594, 0.08634572601318359, 0.1737533416748047, 0.08698675537109375, 0.08697344207763671, 0.08704000091552734, 0.08702982330322266, 0.08781715393066407, 0.08694579315185547, 0.08745369720458984, 0.08681785583496093, 0.08706249237060547, 0.08565961456298828, 0.0864000015258789, 0.0841502685546875, 0.08501248168945312, 0.08692018890380859, 0.08674201965332032, 0.0869222412109375, 0.08693862152099609, 0.0867041244506836, 0.08703385925292968, 0.08675225830078125, 0.086761474609375, 0.08619315338134766, 0.08686489868164063, 0.08680652618408204, 0.08746598052978516, 0.08694783782958984, 0.08730828857421875, 0.08752537536621094, 0.08693350219726563, 0.08705023956298828, 0.08706867218017578, 0.08747212982177735, 0.0880005111694336, 0.08732262420654296, 0.08604876708984376, 0.08707180786132812, 0.08670201873779297, 0.0873164825439453, 0.08682189178466797, 0.08709529876708984, 0.08716492462158203, 0.08606719970703125, 0.08672358703613281, 0.08690790557861328, 0.0867962875366211, 0.08702361297607422, 0.08684031677246094, 0.08695507049560547, 0.08671123504638673, 0.08706867218017578, 0.08863744354248047, 0.08740863800048829, 0.08729503631591796, 0.08682080078125, 0.08690380859375, 0.08818892669677734, 0.08729702758789062, 0.08700415802001953, 0.08678809356689453, 0.0868106231689453, 0.08693657684326171, 0.08690995025634765, 0.17623654174804687, 0.08711167907714844, 0.08703897857666015, 0.08722227478027343, 0.08726322937011718, 0.08703590393066406, 0.08675942230224609, 0.08699088287353515, 0.08680445098876953, 0.08698777770996094, 0.08604876708984376, 0.08687308502197266, 0.08715878295898437, 0.08707379150390625, 0.087108642578125, 0.08752022552490234, 0.08703385925292968, 0.08690995025634765, 0.0869939193725586, 0.08686182403564453, 0.08672665405273437, 0.08708710479736329, 0.086866943359375, 0.08725094604492188, 0.08706047821044922, 0.08675945281982422, 0.08477897644042968, 0.08724479675292969, 0.08646656036376953, 0.086866943359375, 0.08677273559570313, 0.08715058898925782, 0.08593714904785156, 0.08755712127685547, 0.08769945526123046, 0.08737586975097657, 0.0873512954711914, 0.08723763275146484, 0.08681779479980468, 0.08708710479736329, 0.08671129608154297, 0.08673894500732422, 0.08689766693115235, 0.0878828125, 0.08707987213134766, 0.08682710266113282, 0.08714435577392578, 0.08673996734619141, 0.08677581024169922, 0.08577126312255859, 0.08668160247802735, 0.08681574249267578, 0.0868136978149414, 0.08693555450439452, 0.08668364715576173, 0.0859504623413086, 0.08378470611572265, 0.08481996917724609, 0.08730931091308594, 0.08659455871582031, 0.08389631652832032, 0.08560025787353516, 0.08680038452148438, 0.17275801086425782, 0.08369971466064453, 0.08384204864501953, 0.08377139282226563, 0.08545689392089843, 0.08574976348876953, 0.08727859497070313, 0.08688025665283203, 0.0867799072265625, 0.08705741119384766, 0.08711580657958984, 0.08679933166503906, 0.0867962875366211, 0.08704307556152344, 0.08686386871337891, 0.08696320343017579, 0.08698880004882813, 0.08742403411865235, 0.0870973129272461, 0.08734207916259766, 0.08837529754638672, 0.08715980529785156, 0.08705023956298828, 0.086793212890625, 0.0869744644165039, 0.08696217346191407, 0.08680550384521485, 0.08711167907714844, 0.08749362945556641, 0.08701235198974609, 0.08706150054931641, 0.08685772705078125, 0.08689766693115235, 0.08696115112304688, 0.0865955810546875, 0.08719564819335937, 0.08694989013671875, 0.08730931091308594, 0.08871324920654297, 0.08721711730957031, 0.08766668701171874, 0.08723865509033203, 0.08547942352294922, 0.088416259765625, 0.08738614654541016, 0.08706454467773438, 0.08716083526611328, 0.08708815765380859, 0.08697238159179688, 0.08681676483154296, 0.08706559753417968, 0.0872499237060547, 0.08688639831542969, 0.08803123474121094, 0.0873512954711914, 0.08714649963378907, 0.08679936218261719, 0.08706047821044922, 0.08689254760742188, 0.08732466888427734, 0.0865843505859375, 0.08680239868164062, 0.08697548675537109, 0.1747763214111328, 0.08670310211181641, 0.08710451507568359, 0.08699903869628907, 0.08693657684326171, 0.08607334136962891, 0.0869969940185547, 0.08681574249267578, 0.08696627044677735, 0.08683827209472657, 0.08736153411865234, 0.08679219055175781, 0.08696012878417969, 0.086830078125, 0.0871352310180664, 0.08736870574951172, 0.08679424285888672, 0.08719462585449218, 0.08674508666992188, 0.08693862152099609, 0.08799231719970703, 0.08773529815673828, 0.08741171264648437, 0.08712397003173829, 0.08704512023925781, 0.08727654266357422, 0.08699903869628907, 0.08696729278564454, 0.0868853759765625, 0.08697344207763671, 0.08712499237060548, 0.08712499237060548, 0.0865771484375, 0.08749158477783203, 0.08688127899169922, 0.08696524810791016, 0.08687923431396484, 0.08711475372314453, 0.08697856140136719, 0.08686489868164063, 0.08696832275390624, 0.0843724822998047, 0.08361881256103515, 0.08333516693115234, 0.08410316467285156, 0.08653314971923828, 0.0870778579711914, 0.08714342498779297, 0.08421171569824219, 0.08394239807128906, 0.08393215942382813, 0.08404377746582031, 0.08362393951416015, 0.08348060607910156, 0.08413180541992188, 0.08405094146728516, 0.08691302490234375, 0.08714854431152344, 0.08712191772460938, 0.08708812713623047, 0.08714649963378907, 0.08697241973876953, 0.08708812713623047, 0.175857666015625, 0.08723865509033203, 0.08733695983886719, 0.08694886779785156, 0.08750080108642579, 0.08700927734375, 0.0869959716796875, 0.08687718200683593, 0.08966963195800781, 0.0872069091796875, 0.08702873229980469, 0.0869713897705078, 0.08688333129882812, 0.08693247985839844, 0.08705228424072266, 0.08724275207519532, 0.0872847671508789, 0.08704819488525391, 0.08684848022460938, 0.08682803344726563, 0.08715570831298829, 0.08688742065429687, 0.08504934692382812, 0.08777011108398437, 0.08640819549560547, 0.08897232055664063, 0.08616751861572265, 0.0871905288696289, 0.0890224609375, 0.0864686050415039, 0.08654438018798828, 0.08633036804199219, 0.08477593231201172, 0.08412876892089843, 0.08437452697753907, 0.08408985900878906, 0.08387686157226562, 0.08403353881835937, 0.08393318176269532, 0.08396185302734376, 0.08435302734375, 0.08403558349609375, 0.08378470611572265, 0.0841338882446289, 0.08378470611572265, 0.08367820739746094, 0.08367922973632813, 0.0835563507080078, 0.08376831817626954, 0.08383078765869141, 0.08380723571777343, 0.0837734375, 0.08563814544677735, 0.08487833404541016, 0.08407552337646484, 0.0837580795288086, 0.0836485137939453, 0.08376012420654297, 0.08358809661865234, 0.08362290954589843, 0.08371814727783203, 0.08357478332519531, 0.08497869110107421, 0.1696030731201172, 0.08372121429443359, 0.08371916961669922, 0.0837918701171875, 0.08386457824707032, 0.08376319885253906, 0.08395263671875, 0.08374784088134765, 0.08342527770996094, 0.08687206268310547, 0.08412262725830078, 0.08380416107177735, 0.08415436553955079, 0.0838440933227539, 0.08380316925048828, 0.08431715393066407, 0.08392499542236329, 0.08376319885253906, 0.08381747436523437, 0.08540467071533203, 0.08532991790771484, 0.08402022552490235, 0.084421630859375, 0.0841195526123047, 0.08383897399902343, 0.08380623626708984, 0.08381231689453125, 0.08393830108642578, 0.08379705810546875, 0.08394233703613281, 0.08370175933837891, 0.08405811309814454, 0.08380210876464844, 0.0837734375, 0.08362290954589843, 0.08380006408691407, 0.08357478332519531, 0.08359935760498047, 0.08378470611572265, 0.0834549789428711, 0.08362188720703125, 0.08369152069091797, 0.08387686157226562, 0.08376627349853516, 0.08377961730957031, 0.0838430404663086, 0.08387686157226562, 0.08553778839111328, 0.0837232666015625, 0.08371916961669922, 0.08374476623535156, 0.08370073699951172, 0.08382259368896484, 0.08384921264648437, 0.08401203155517578, 0.08357990264892579, 0.08365055847167968, 0.08378575897216797, 0.08367919921875, 0.08369055938720703, 0.0851127700805664, 0.08426700592041016, 0.08618905639648437, 0.1722030029296875, 0.08384819030761718, 0.08397721862792969, 0.0837570571899414, 0.08369664001464844, 0.0834549789428711, 0.08403865814208984, 0.08388505554199219, 0.08392908477783204, 0.08388198089599609, 0.08392704010009766, 0.08387379455566406, 0.0837027816772461, 0.08390860748291015, 0.08369254302978515, 0.08369766235351563, 0.08587161254882812, 0.0840478744506836, 0.08396288299560548, 0.08394649505615234, 0.08418099212646485, 0.08524288177490234, 0.08649318695068359, 0.0840816650390625, 0.08387174224853515, 0.08375603485107422, 0.08405811309814454, 0.08390758514404296, 0.0839393310546875, 0.08409497833251953, 0.08389836883544922, 0.08448000335693359, 0.08434893035888671, 0.08451583862304687, 0.0842639389038086, 0.08398745727539063, 0.08408370971679688, 0.08378880310058594, 0.08385740661621094, 0.08418406677246094, 0.08394035339355468, 0.08402124786376954, 0.08522137451171875, 0.08547840118408204, 0.08503705596923829, 0.08401510620117188, 0.08397516632080078, 0.08380723571777343, 0.08466022491455077, 0.0840079345703125, 0.08379193878173828, 0.08372525024414063, 0.08389631652832032, 0.0839393310546875, 0.08357071685791016, 0.08390652465820313, 0.08391577911376953, 0.0837550048828125, 0.08391474914550781, 0.08372633361816406, 0.08374476623535156, 0.08390758514404296, 0.0837550048828125, 0.1732474822998047, 0.08425472259521484, 0.0840273895263672, 0.08414924621582032, 0.08400179290771484, 0.08393830108642578, 0.08408268737792969, 0.08393727874755859, 0.08398745727539063, 0.08395468902587891, 0.08501862335205078, 0.08602214050292968, 0.08572313690185547, 0.08382463836669922, 0.08390962982177734, 0.08375296020507812, 0.08380928039550781, 0.0840263671875, 0.08383385467529297, 0.08391986846923828, 0.08388505554199219, 0.083666015625, 0.08356137847900391, 0.08396697235107421, 0.08387481689453125, 0.08371916961669922, 0.08389119720458985, 0.08397926330566406, 0.0838656005859375, 0.08394751739501953, 0.08407244873046875, 0.0837232666015625, 0.08388813018798828, 0.08374578857421874, 0.08396185302734376, 0.08385330963134766, 0.08368434906005859, 0.08367206573486329, 0.08419328308105468, 0.08365875244140625, 0.08380825805664062, 0.08445235443115234, 0.0841533432006836, 0.08358707427978515, 0.0837027816772461, 0.08361472320556641, 0.08509645080566407, 0.08371302032470704, 0.08371097564697266, 0.08373248291015625, 0.08388198089599609, 0.08400691223144531, 0.08375910186767578, 0.08609996795654297, 0.08662732696533203, 0.08376422119140625, 0.08392396545410157, 0.08380006408691407, 0.08389119720458985, 0.0837754898071289, 0.0837734375, 0.0838175048828125, 0.08397615814208985]",tokens/s,11.480430869002337,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33e4-491bf23e0562996f7fa9d3c8;d8a555a4-2e5e-422e-9183-9adf3fd4ea8d) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a328b-47de0c457aa01eb86b69c4a8;916d2eb3-4c07-4ecf-a307-d7323f759a35) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2119.368704,2816.999424,0.0,2170.55232,1927.744512,s,10,2.4524682617187503,0.245246826171875,0.001090999616645648,0.2451712341308594,0.24625067138671874,0.24669122314453126,0.24704366455078125,"[0.24615277099609376, 0.24713177490234375, 0.24369766235351562, 0.24442691040039063, 0.244733154296875, 0.243721435546875, 0.24478985595703126, 0.2455526123046875, 0.24614064025878907, 0.24612144470214845]",tokens/s,1043.8463322684916,kWh,2.8755634550064334e-06,1.5756288392757208e-06,1.2476319504855795e-05,1.6927511799137947e-05,tokens/kWh,15123309.499807118,MB,2119.368704,2816.999424,0.0,2170.55232,2031.983104,s,10,142.79050195312502,14.279050195312502,0.0031195370707275648,14.27847802734375,14.28149794921875,14.284002392578124,14.286005947265625,"[14.2787724609375, 14.2797041015625, 14.28094140625, 14.27818359375, 14.2772841796875, 14.2808662109375, 14.2747421875, 14.275935546875, 14.2775654296875, 14.2865068359375]",tokens/s,4.412058164812777,kWh,0.00016860610004691852,9.240989681349414e-05,0.0007205406280193562,0.000981556624879769,tokens/kWh,64183.76525930624,,s,629,144.7668543243408,0.23015398143774374,0.029166744118704197,0.22656614685058593,0.22702059936523436,0.22731673583984374,0.47147623168945313,"[0.228210693359375, 0.2264954833984375, 0.2263234558105469, 0.2265917510986328, 0.22652210998535155, 0.2266122283935547, 0.22653952026367188, 0.22631117248535157, 0.22642892456054686, 0.22647091674804687, 0.22641664123535157, 0.22648934936523438, 0.2270392303466797, 0.22645555114746094, 0.22661325073242186, 0.22698086547851562, 0.22665011596679688, 0.22659788513183593, 0.2262640686035156, 0.22631219482421874, 0.22640025329589844, 0.22692658996582032, 0.22699212646484376, 0.22696038818359374, 0.22650674438476562, 0.22637158203125, 0.2266552276611328, 0.22633882141113282, 0.22646173095703126, 0.2265814666748047, 0.22644224548339845, 0.22677912902832031, 0.22628044128417968, 0.22633573913574218, 0.2265681915283203, 0.226693115234375, 0.22634597778320312, 0.2265016326904297, 0.2263531494140625, 0.22655078125, 0.22668800354003907, 0.2267904052734375, 0.22690406799316407, 0.2266306610107422, 0.2264575958251953, 0.22657638549804687, 0.22662553405761718, 0.22665113830566405, 0.22651187133789064, 0.22652723693847657, 0.22627635192871093, 0.22631219482421874, 0.22654464721679687, 0.22648934936523438, 0.2266378173828125, 0.22687333679199218, 0.22802943420410157, 0.2267176971435547, 0.2265917510986328, 0.22732595825195312, 0.2264842224121094, 0.22659481811523438, 0.47421746826171873, 0.22658047485351562, 0.22655282592773437, 0.22653030395507812, 0.22670950317382813, 0.22652517700195313, 0.22650469970703124, 0.22647705078125, 0.22634803771972656, 0.2266787872314453, 0.22657125854492188, 0.22652621459960937, 0.22686515808105467, 0.2263900146484375, 0.22639718627929686, 0.22662553405761718, 0.22654975891113283, 0.22652928161621094, 0.2263838653564453, 0.22636647033691407, 0.2263582763671875, 0.22654975891113283, 0.22638899230957032, 0.2263961639404297, 0.22621388244628907, 0.22636749267578124, 0.22646170043945313, 0.22642381286621094, 0.22654156494140626, 0.2262988739013672, 0.22632652282714844, 0.2261411895751953, 0.2261012420654297, 0.22648838806152344, 0.22639097595214844, 0.2262917175292969, 0.22649856567382812, 0.2264145965576172, 0.22670130920410156, 0.2264453125, 0.227240966796875, 0.22686003112792968, 0.22684364318847655, 0.22670335388183593, 0.2264524841308594, 0.2268170166015625, 0.22824140930175782, 0.2270392303466797, 0.22767205810546876, 0.22691123962402343, 0.22711602783203125, 0.22689286804199219, 0.22666029357910156, 0.2267361297607422, 0.2266439666748047, 0.22683135986328126, 0.22709965515136718, 0.22691226196289063, 0.22731365966796874, 0.22672998046875, 0.22711911010742186, 0.22663475036621095, 0.22664601135253906, 0.47127346801757813, 0.2265681915283203, 0.226840576171875, 0.22668389892578125, 0.22649650573730468, 0.22704742431640626, 0.22672998046875, 0.22683135986328126, 0.22644326782226562, 0.2265743408203125, 0.226555908203125, 0.22665933227539062, 0.22649754333496094, 0.22686720275878905, 0.22685696411132814, 0.2270627899169922, 0.22701158142089845, 0.22654360961914063, 0.227631103515625, 0.22625177001953126, 0.2269306945800781, 0.226946044921875, 0.2268231658935547, 0.2272542724609375, 0.2273054656982422, 0.22650982666015626, 0.22637158203125, 0.22674432373046874, 0.22774681091308593, 0.22656512451171876, 0.22653132629394532, 0.22656101989746094, 0.22630706787109375, 0.22627430725097655, 0.22650367736816407, 0.2263654327392578, 0.2271293487548828, 0.2264596405029297, 0.2263173065185547, 0.2265753631591797, 0.22677912902832031, 0.22655282592773437, 0.22645350646972656, 0.2264954833984375, 0.22634597778320312, 0.22660403442382812, 0.226482177734375, 0.22736895751953126, 0.22658149719238282, 0.22649037170410155, 0.22647296142578124, 0.2265016326904297, 0.22649650573730468, 0.22651187133789064, 0.22654464721679687, 0.22644224548339845, 0.22651084899902343, 0.22658662414550781, 0.22652210998535155, 0.22644940185546875, 0.2268712921142578, 0.226334716796875, 0.22724812316894533, 0.4715550842285156, 0.22658047485351562, 0.22659788513183593, 0.22659075927734376, 0.2265425567626953, 0.22664601135253906, 0.22673306274414062, 0.22659481811523438, 0.2266234893798828, 0.22643096923828124, 0.2263951416015625, 0.22653439331054687, 0.22656716918945313, 0.22648626708984376, 0.2264842224121094, 0.2265323486328125, 0.22739251708984376, 0.22662757873535155, 0.22650778198242189, 0.22635621643066406, 0.22640025329589844, 0.22646783447265625, 0.2263951416015625, 0.2265374755859375, 0.22659071350097656, 0.22637158203125, 0.22640229797363282, 0.22639820861816407, 0.2262640686035156, 0.22646578979492188, 0.22665728759765624, 0.22639411926269531, 0.2265692138671875, 0.22644940185546875, 0.22637055969238282, 0.22653132629394532, 0.22651084899902343, 0.22662042236328125, 0.226440185546875, 0.22653952026367188, 0.2264248352050781, 0.22653132629394532, 0.22682112121582032, 0.22637464904785157, 0.22807859802246094, 0.22658047485351562, 0.22653543090820313, 0.22662144470214843, 0.2265364532470703, 0.22637158203125, 0.2266941375732422, 0.2265016326904297, 0.22764134216308593, 0.2268037109375, 0.22737408447265625, 0.2267689666748047, 0.22669818115234375, 0.22641868591308595, 0.2270064697265625, 0.226808837890625, 0.22760447692871094, 0.2266941375732422, 0.22663475036621095, 0.4716912536621094, 0.226555908203125, 0.22654669189453125, 0.22630400085449218, 0.22651904296875, 0.22646885681152343, 0.2263726043701172, 0.2264627227783203, 0.226376708984375, 0.22640956115722657, 0.22651280212402344, 0.2264944610595703, 0.22653030395507812, 0.22637055969238282, 0.22640538024902343, 0.22671359252929688, 0.22652825927734374, 0.22724607849121095, 0.2267904052734375, 0.22637464904785157, 0.2265006103515625, 0.2266173400878906, 0.2275594177246094, 0.22663168334960937, 0.22695526123046875, 0.22644326782226562, 0.2264453125, 0.22670541381835937, 0.22647193908691407, 0.2264842224121094, 0.22652928161621094, 0.22634495544433594, 0.22692658996582032, 0.2264575958251953, 0.22655078125, 0.22647705078125, 0.22659584045410155, 0.22641151428222656, 0.22638490295410157, 0.2264320068359375, 0.22686924743652342, 0.22668185424804688, 0.226555908203125, 0.22660096740722657, 0.2265364532470703, 0.22648013305664064, 0.22652006530761717, 0.22684774780273437, 0.22667674255371092, 0.2264524841308594, 0.2264145965576172, 0.22648320007324219, 0.2265518035888672, 0.22661631774902344, 0.22665216064453125, 0.22664909362792968, 0.22723379516601563, 0.22827008056640624, 0.22659890747070313, 0.22667776489257813, 0.22666752624511718, 0.22652517700195313, 0.22685696411132814, 0.47040716552734374, 0.2265364532470703, 0.22670335388183593, 0.22675152587890626, 0.22674327087402343, 0.22674124145507812, 0.22666854858398439, 0.2265016326904297, 0.22641664123535157, 0.226408447265625, 0.22644837951660157, 0.22657331848144532, 0.22669926452636718, 0.22673817443847658, 0.22666648864746095, 0.22652006530761717, 0.22712115478515624, 0.2267709503173828, 0.22682829284667969, 0.2264514617919922, 0.22717543029785156, 0.22672691345214843, 0.22699417114257814, 0.22659379577636718, 0.22670541381835937, 0.22654464721679687, 0.22634701538085938, 0.22648320007324219, 0.226376708984375, 0.22674330139160156, 0.2265518035888672, 0.22656716918945313, 0.22633882141113282, 0.22662144470214843, 0.22681292724609375, 0.22643609619140626, 0.22689791870117187, 0.22637055969238282, 0.22802841186523437, 0.22646885681152343, 0.22660301208496095, 0.22652006530761717, 0.22687744140625, 0.226482177734375, 0.22646476745605468, 0.22653132629394532, 0.2266787872314453, 0.22718771362304688, 0.22725325012207032, 0.22660914611816407, 0.22652517700195313, 0.2265364532470703, 0.2266623992919922, 0.2268590087890625, 0.2265999298095703, 0.2266439666748047, 0.22733311462402345, 0.2267484130859375, 0.22656101989746094, 0.22658047485351562, 0.22690713500976561, 0.22647296142578124, 0.2265333709716797, 0.4723138427734375, 0.22668698120117187, 0.22651699829101563, 0.22642994689941406, 0.22652006530761717, 0.22658355712890624, 0.22648832702636718, 0.22646067810058593, 0.22636134338378905, 0.2262917175292969, 0.22641766357421875, 0.22637362670898437, 0.226440185546875, 0.22636851501464844, 0.2264944610595703, 0.2264596405029297, 0.22644122314453125, 0.22647398376464845, 0.22640127563476561, 0.22637977600097656, 0.22642892456054686, 0.22646067810058593, 0.22642994689941406, 0.22729216003417968, 0.2265856018066406, 0.22656614685058593, 0.22651187133789064, 0.22686924743652342, 0.22699212646484376, 0.22667776489257813, 0.22668083190917968, 0.22647091674804687, 0.226440185546875, 0.2262794189453125, 0.22660198974609375, 0.22650778198242189, 0.227240966796875, 0.22670130920410156, 0.2265927734375, 0.22639820861816407, 0.22648524475097656, 0.22650265502929687, 0.22666444396972657, 0.2264637451171875, 0.22673306274414062, 0.2266306610107422, 0.2264524841308594, 0.22670541381835937, 0.22680677795410156, 0.22637773132324218, 0.22640333557128905, 0.22649754333496094, 0.22653030395507812, 0.22647602844238282, 0.2265180206298828, 0.2267105255126953, 0.2264811553955078, 0.2263408660888672, 0.22684159851074218, 0.226840576171875, 0.227162109375, 0.22650572204589844, 0.22680064392089844, 0.47229953002929687, 0.22655386352539061, 0.22669209289550782, 0.22669107055664062, 0.22653849792480468, 0.226914306640625, 0.22678323364257813, 0.22689485168457033, 0.2268078155517578, 0.22660812377929687, 0.22647296142578124, 0.22665318298339843, 0.22659584045410155, 0.2267166748046875, 0.22655897521972657, 0.22654566955566408, 0.2271068115234375, 0.22682009887695312, 0.22680575561523436, 0.22656204223632812, 0.2264217529296875, 0.22652108764648438, 0.22662553405761718, 0.22673408508300782, 0.22695730590820312, 0.22662757873535155, 0.2265364532470703, 0.22667570495605469, 0.2265743408203125, 0.22657125854492188, 0.22663679504394532, 0.2263592987060547, 0.22638490295410157, 0.22639411926269531, 0.2264514617919922, 0.22640640258789063, 0.22657125854492188, 0.22636134338378905, 0.22635008239746093, 0.22646681213378905, 0.2263756866455078, 0.22643507385253905, 0.226661376953125, 0.22654054260253906, 0.2264842224121094, 0.2266480712890625, 0.22652621459960937, 0.22654054260253906, 0.22649958801269532, 0.22644326782226562, 0.22654156494140626, 0.2265886688232422, 0.22666546630859374, 0.22688870239257813, 0.2266972198486328, 0.22649856567382812, 0.22649958801269532, 0.22640847778320314, 0.22657020568847655, 0.22655078125, 0.2271825866699219, 0.22659686279296876, 0.22644224548339845, 0.4732682189941406, 0.2267166748046875, 0.22669004821777344, 0.2266112060546875, 0.22709043884277344, 0.2265323486328125, 0.2266306610107422, 0.2265927734375, 0.22674636840820311, 0.22685285949707032, 0.22652723693847657, 0.2264954833984375, 0.22642381286621094, 0.22641253662109376, 0.22645452880859376, 0.2264248352050781, 0.22657125854492188, 0.22701670837402343, 0.22656716918945313, 0.2264268798828125, 0.22636441040039063, 0.22659379577636718, 0.22651596069335939, 0.22646476745605468, 0.22653543090820313, 0.22654873657226562, 0.22635110473632813, 0.2264453125, 0.22657740783691407, 0.22701568603515626, 0.22657331848144532, 0.226808837890625, 0.22729318237304688, 0.226840576171875, 0.22669004821777344, 0.22642994689941406, 0.22665216064453125, 0.226408447265625, 0.2264637451171875, 0.22659071350097656, 0.22760140991210936, 0.2266787872314453, 0.22653952026367188, 0.2264842224121094, 0.22630911254882813, 0.22652825927734374, 0.22639820861816407, 0.22657023620605468, 0.22674330139160156, 0.2271262664794922, 0.22665728759765624, 0.2265333709716797, 0.22666444396972657, 0.22674227905273436, 0.22675149536132813, 0.2265364532470703, 0.22639820861816407, 0.22652210998535155, 0.2266112060546875, 0.22665113830566405, 0.22656614685058593, 0.2264627227783203, 0.22642073059082032, 0.4729661560058594, 0.22653132629394532, 0.22665420532226563, 0.22658566284179688, 0.22666950988769533, 0.22666035461425782, 0.2266480712890625, 0.2267525177001953, 0.22648934936523438, 0.2266399688720703, 0.2264431610107422, 0.22651187133789064, 0.22709657287597657, 0.22754713439941407, 0.22637055969238282, 0.22647091674804687, 0.22653439331054687, 0.22654975891113283, 0.2265999298095703, 0.22654360961914063, 0.22677197265625, 0.22703718566894532, 0.2268784637451172, 0.22653543090820313, 0.22652210998535155, 0.22646681213378905, 0.2276433868408203, 0.2268784637451172, 0.2265886688232422, 0.22731878662109375, 0.2269306945800781, 0.22686822509765625, 0.22654464721679687, 0.22657945251464845, 0.2267484130859375, 0.22667263793945314, 0.22669209289550782, 0.22645350646972656, 0.22654566955566408, 0.22655282592773437, 0.22689996337890625, 0.2264698944091797, 0.22683544921875, 0.22653952026367188, 0.22666035461425782, 0.22665420532226563, 0.22662655639648438, 0.2269420166015625, 0.2271200714111328, 0.2266972198486328, 0.22657331848144532, 0.22654368591308593, 0.22663877868652343, 0.22703616333007812, 0.22663372802734374, 0.22663270568847657, 0.22767308044433593, 0.22783692932128907, 0.22670130920410156, 0.2265927734375, 0.2266378173828125, 0.22650469970703124, 0.22659584045410155]",tokens/s,4.344917232163973,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30da-34480b60495b2e472eb2206f;8191fc8b-9f52-49ce-a3a9-f3fb27211d8f) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4421.566464,24111.480832,0.0,23465.033728,21691.057664,s,10,26.201972412109374,2.6201972412109376,0.0014392080916773324,2.620363525390625,2.621498388671875,2.622221728515625,2.622800400390625,"[2.62030126953125, 2.620416259765625, 2.620310791015625, 2.621337646484375, 2.620593017578125, 2.619945556640625, 2.622945068359375, 2.618053466796875, 2.6175732421875, 2.62049609375]",tokens/s,97.70256833095829,kWh,3.092599951558643e-05,1.6948578058490964e-05,0.00015162839908040572,0.00019950297665448312,tokens/kWh,1283188.874135765,MB,4421.566464,24111.480832,0.0,23465.033728,21890.213376,s,10,1554.3346406249998,155.43346406249998,0.013784362336340894,155.428859375,155.4548328125,155.45787734375,155.46031296875,"[155.44153125, 155.42890625, 155.45415625, 155.426875, 155.41909375, 155.416984375, 155.4288125, 155.42403125, 155.460921875, 155.433328125]",tokens/s,0.4053181236099044,kWh,0.0018348197203377882,0.0010056435987538315,0.008919082301926796,0.011759545621018413,tokens/kWh,5357.349852650515,,s,629,1575.449455322266,2.504689118159405,0.3111804707409938,2.46706884765625,2.4683868164062504,2.4691709960937502,5.086480859375,"[2.467560546875, 2.468384765625, 2.46702587890625, 2.467464111328125, 2.46763427734375, 2.468833251953125, 2.467610595703125, 2.46744580078125, 2.467310546875, 2.4678994140625, 2.46620263671875, 2.466724853515625, 2.46737109375, 2.46790234375, 2.466193359375, 2.46736279296875, 2.4661279296875, 2.467852294921875, 2.469380126953125, 2.468475830078125, 2.467101806640625, 2.46930322265625, 2.4674384765625, 2.46818505859375, 2.46793115234375, 2.468096923828125, 2.466873291015625, 2.466653076171875, 2.466144287109375, 2.467576904296875, 2.466427978515625, 2.46687744140625, 2.467862548828125, 2.468832275390625, 2.46677001953125, 2.46746533203125, 2.4666767578125, 2.467177490234375, 2.4664228515625, 2.466610107421875, 2.466058349609375, 2.46723388671875, 2.470784912109375, 2.466979736328125, 2.466400146484375, 2.4673740234375, 2.467078125, 2.466891845703125, 2.46717041015625, 2.467664794921875, 2.4676455078125, 2.467311767578125, 2.4667421875, 2.46812060546875, 2.467968017578125, 2.467040283203125, 2.466512939453125, 2.4665068359375, 2.467083251953125, 2.466418701171875, 2.465967041015625, 2.466157470703125, 5.087494140625, 2.466189208984375, 2.466884521484375, 2.46639501953125, 2.46648828125, 2.466607177734375, 2.467287109375, 2.46839599609375, 2.46702587890625, 2.467313720703125, 2.467323974609375, 2.467330078125, 2.466697265625, 2.46641455078125, 2.4657080078125, 2.46809912109375, 2.46658984375, 2.467029052734375, 2.46753076171875, 2.468769775390625, 2.467987548828125, 2.468222900390625, 2.4690595703125, 2.469578857421875, 2.46847900390625, 2.4680283203125, 2.46656298828125, 2.46801513671875, 2.46639013671875, 2.4670986328125, 2.46615234375, 2.46773974609375, 2.4667216796875, 2.46727685546875, 2.4663193359375, 2.46763525390625, 2.46688671875, 2.467874755859375, 2.46723779296875, 2.46824853515625, 2.466218017578125, 2.466986083984375, 2.46668505859375, 2.4669931640625, 2.4665302734375, 2.465965087890625, 2.468173828125, 2.467091552734375, 2.4666787109375, 2.466904052734375, 2.466720703125, 2.466922607421875, 2.46651806640625, 2.466711669921875, 2.46662158203125, 2.4666767578125, 2.466606201171875, 2.466504638671875, 2.46702294921875, 2.467114990234375, 2.46698291015625, 2.466469970703125, 2.46719384765625, 5.0873857421875, 2.46711181640625, 2.4676669921875, 2.4682373046875, 2.46677294921875, 2.465900634765625, 2.466285400390625, 2.46675244140625, 2.4686142578125, 2.467852294921875, 2.468147216796875, 2.469138427734375, 2.468021240234375, 2.46839501953125, 2.467322998046875, 2.46803857421875, 2.4694794921875, 2.467284912109375, 2.467267578125, 2.46729736328125, 2.471918701171875, 2.4674765625, 2.467620849609375, 2.468518798828125, 2.46965966796875, 2.468581298828125, 2.467287109375, 2.4676025390625, 2.46909130859375, 2.4678798828125, 2.466585693359375, 2.46625390625, 2.468111328125, 2.466431884765625, 2.4665302734375, 2.46625390625, 2.46766796875, 2.46662646484375, 2.46632763671875, 2.466314208984375, 2.468085693359375, 2.467567626953125, 2.47077490234375, 2.466901123046875, 2.46776416015625, 2.466997314453125, 2.468117431640625, 2.471785400390625, 2.468166748046875, 2.46658154296875, 2.467800048828125, 2.4673310546875, 2.46755029296875, 2.466908203125, 2.4666357421875, 2.4657724609375, 2.4668466796875, 2.46647509765625, 2.46641455078125, 2.4653935546875, 2.46607861328125, 2.465657958984375, 2.46717041015625, 5.08862353515625, 2.46618017578125, 2.46677001953125, 2.465965087890625, 2.4666962890625, 2.46651806640625, 2.466512939453125, 2.4657919921875, 2.46704833984375, 2.466091064453125, 2.4665322265625, 2.466198486328125, 2.466378662109375, 2.4663388671875, 2.466697265625, 2.4661689453125, 2.467324951171875, 2.46691748046875, 2.46706884765625, 2.46692041015625, 2.466785400390625, 2.467284912109375, 2.467230712890625, 2.4668681640625, 2.4700732421875, 2.46681396484375, 2.466840576171875, 2.46628857421875, 2.4691845703125, 2.469140380859375, 2.468798583984375, 2.46765869140625, 2.4666748046875, 2.46800390625, 2.466842529296875, 2.46696044921875, 2.46841552734375, 2.46970166015625, 2.468170654296875, 2.467095458984375, 2.46616259765625, 2.468423583984375, 2.46754296875, 2.467493896484375, 2.469568603515625, 2.467686279296875, 2.467119140625, 2.46765576171875, 2.4675615234375, 2.46753076171875, 2.46786962890625, 2.467504150390625, 2.466217041015625, 2.46670751953125, 2.466275390625, 2.46649951171875, 2.465642578125, 2.4665927734375, 2.466114501953125, 2.46563134765625, 2.466107421875, 2.466840576171875, 2.46594775390625, 5.0872607421875, 2.466586669921875, 2.467085205078125, 2.466087890625, 2.46624658203125, 2.468030517578125, 2.466228271484375, 2.46631201171875, 2.465871826171875, 2.466620361328125, 2.466470947265625, 2.4659375, 2.465469482421875, 2.467073974609375, 2.467386474609375, 2.466375732421875, 2.46649658203125, 2.469547119140625, 2.46750634765625, 2.466642822265625, 2.46601123046875, 2.466673583984375, 2.466423828125, 2.466227294921875, 2.466232421875, 2.467124267578125, 2.466873291015625, 2.46641162109375, 2.466154541015625, 2.46636962890625, 2.466863037109375, 2.46698291015625, 2.467325927734375, 2.46662744140625, 2.4677744140625, 2.466999267578125, 2.467263427734375, 2.466723876953125, 2.46856201171875, 2.46679150390625, 2.466046875, 2.46635107421875, 2.46673828125, 2.467541015625, 2.4671416015625, 2.467443603515625, 2.466511962890625, 2.467031982421875, 2.46706787109375, 2.4669921875, 2.468369384765625, 2.467056640625, 2.46666455078125, 2.467033203125, 2.468075439453125, 2.468128662109375, 2.466417724609375, 2.471729248046875, 2.467071044921875, 2.466891845703125, 2.46647509765625, 2.466440185546875, 2.46868798828125, 5.08691357421875, 2.466028564453125, 2.4677333984375, 2.467010498046875, 2.467205078125, 2.4664013671875, 2.4672451171875, 2.4667392578125, 2.46585546875, 2.466406494140625, 2.46685693359375, 2.4661708984375, 2.4663828125, 2.46592822265625, 2.466884521484375, 2.466239501953125, 2.46583203125, 2.465574951171875, 2.46738330078125, 2.467092529296875, 2.4664677734375, 2.467158935546875, 2.467737548828125, 2.4660244140625, 2.4675830078125, 2.46628662109375, 2.467053466796875, 2.4666142578125, 2.46684375, 2.466788330078125, 2.467851318359375, 2.46658056640625, 2.468338623046875, 2.467275634765625, 2.46681201171875, 2.466661376953125, 2.466303955078125, 2.46763720703125, 2.473092041015625, 2.46727685546875, 2.467389404296875, 2.467167236328125, 2.467786865234375, 2.4663837890625, 2.4663388671875, 2.465721435546875, 2.467124267578125, 2.4679638671875, 2.46670751953125, 2.46626416015625, 2.467136474609375, 2.466255859375, 2.4674990234375, 2.466595947265625, 2.466490478515625, 2.4665712890625, 2.46690087890625, 2.466754638671875, 2.4670341796875, 2.467210205078125, 2.467547119140625, 2.467244140625, 2.466747314453125, 5.09081201171875, 2.467222412109375, 2.46835205078125, 2.467926025390625, 2.4673740234375, 2.467116943359375, 2.46747021484375, 2.467093505859375, 2.466975830078125, 2.46742626953125, 2.4675400390625, 2.467567626953125, 2.4670546875, 2.467200927734375, 2.467986328125, 2.46847900390625, 2.468297607421875, 2.466345947265625, 2.467420166015625, 2.468263916015625, 2.46740283203125, 2.466755615234375, 2.467157958984375, 2.471689208984375, 2.467622802734375, 2.4668671875, 2.467239013671875, 2.46835205078125, 2.467516357421875, 2.46651904296875, 2.466069580078125, 2.46681591796875, 2.466124755859375, 2.465919921875, 2.465881103515625, 2.46696240234375, 2.46622607421875, 2.46639208984375, 2.465967041015625, 2.46714990234375, 2.466946044921875, 2.466486328125, 2.465672119140625, 2.466734130859375, 2.466515869140625, 2.46664404296875, 2.4666142578125, 2.4672265625, 2.466231201171875, 2.46652001953125, 2.4661728515625, 2.46692041015625, 2.466157470703125, 2.46626611328125, 2.46582470703125, 2.466809814453125, 2.467745849609375, 2.4677080078125, 2.466193359375, 2.4672470703125, 2.467043212890625, 2.47162060546875, 2.4657490234375, 5.08459130859375, 2.46683544921875, 2.46649755859375, 2.467766357421875, 2.467400634765625, 2.467850341796875, 2.467080078125, 2.46782470703125, 2.466931640625, 2.46681298828125, 2.466908203125, 2.467493896484375, 2.466670654296875, 2.46761767578125, 2.467306396484375, 2.46744873046875, 2.46968310546875, 2.467335205078125, 2.466291748046875, 2.46672900390625, 2.4669912109375, 2.468328369140625, 2.466957275390625, 2.46684375, 2.467378173828125, 2.4669912109375, 2.467179443359375, 2.46784521484375, 2.468820068359375, 2.466711669921875, 2.465966064453125, 2.465700927734375, 2.4670791015625, 2.46738427734375, 2.467485595703125, 2.4666328125, 2.4676455078125, 2.466926513671875, 2.46613720703125, 2.46624755859375, 2.4671220703125, 2.46669921875, 2.465594482421875, 2.46590771484375, 2.466947021484375, 2.466668701171875, 2.466886474609375, 2.46719189453125, 2.46757080078125, 2.466482177734375, 2.46595068359375, 2.46605712890625, 2.467263427734375, 2.466189208984375, 2.467197998046875, 2.46523095703125, 2.466417724609375, 2.466122802734375, 2.467242919921875, 2.466663330078125, 2.46723388671875, 2.466820068359375, 2.472427490234375, 5.0853681640625, 2.46607568359375, 2.46763818359375, 2.466788330078125, 2.467725341796875, 2.466460693359375, 2.467099609375, 2.46757177734375, 2.46760546875, 2.466769775390625, 2.46808056640625, 2.46717236328125, 2.468547607421875, 2.46778369140625, 2.4672236328125, 2.466810791015625, 2.467812255859375, 2.468862060546875, 2.467946533203125, 2.467600341796875, 2.46891015625, 2.46693994140625, 2.46805810546875, 2.46829052734375, 2.469150634765625, 2.468052978515625, 2.4687646484375, 2.467143798828125, 2.467037109375, 2.466817138671875, 2.468148193359375, 2.467322998046875, 2.467407958984375, 2.468336669921875, 2.468115478515625, 2.4671845703125, 2.4663388671875, 2.4676884765625, 2.467588134765625, 2.466388916015625, 2.466503662109375, 2.466964599609375, 2.4676494140625, 2.466975830078125, 2.466547607421875, 2.46734033203125, 2.467566650390625, 2.467642333984375, 2.466969482421875, 2.468360107421875, 2.467146728515625, 2.466821044921875, 2.466783203125, 2.46778466796875, 2.46734130859375, 2.467453857421875, 2.467220458984375, 2.468514892578125, 2.467493896484375, 2.4675439453125, 2.467715087890625, 2.47457275390625, 2.468958251953125, 5.0886162109375, 2.467864501953125, 2.469477294921875, 2.468490234375, 2.4670166015625, 2.467751953125, 2.46676171875, 2.466572265625, 2.467084228515625, 2.467091552734375, 2.4673525390625, 2.4678388671875, 2.46633154296875, 2.467618896484375, 2.46672900390625, 2.466831298828125, 2.4672685546875, 2.467834716796875, 2.46698193359375, 2.467683349609375, 2.46766796875, 2.46748974609375, 2.46799560546875, 2.4667822265625, 2.4664052734375, 2.467306396484375, 2.469897216796875, 2.46721630859375, 2.46658251953125, 2.46739453125, 2.467197998046875, 2.468125732421875, 2.466786376953125, 2.467189697265625, 2.467324951171875, 2.467589111328125, 2.46772314453125, 2.468556884765625, 2.466018310546875, 2.466438232421875, 2.467070068359375, 2.466093017578125, 2.466754638671875, 2.46706884765625, 2.46609521484375, 2.467577880859375, 2.46765771484375, 2.468675537109375, 2.46646875, 2.466482177734375, 2.466712646484375, 2.465977294921875, 2.46544384765625, 2.466545654296875, 2.466736083984375, 2.467407958984375, 2.466906005859375, 2.466165771484375, 2.46822900390625, 2.4675185546875, 2.466716552734375, 2.4658984375, 2.466937744140625]",tokens/s,0.3992511456810495,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c11-618653220aebb96e2d5d7e0c;c65b0433-f967-4980-9e86-0b76ec94d27e) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32e0-34e6ae447fbbd44e1703448d;cdfc422c-0226-4f9b-87e0-e407aeaa5e83) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3905.671168,12732.33408,0.0,12085.886976,11337.501696,s,10,10.970613281250001,1.097061328125,0.0017840508518984434,1.097439453125,1.0989015869140624,1.0993332397460938,1.0996785620117187,"[1.0981041259765625, 1.099764892578125, 1.0942474365234376, 1.09466943359375, 1.09612890625, 1.0954654541015625, 1.0967747802734376, 1.0988056640625, 1.098486328125, 1.098166259765625]",tokens/s,233.35067369253872,kWh,1.2926500191291174e-05,7.083296092205274e-06,6.255157781898113e-05,8.256137410247758e-05,tokens/kWh,3100723.5863376665,MB,3909.746688,12732.33408,0.0,12085.886976,11686.806016,s,10,636.987296875,63.6987296875,0.008792231338471245,63.69969140625,63.70640390625,63.70996171875,63.71280796875,"[63.682359375, 63.687671875, 63.70561328125, 63.70497265625, 63.69943359375, 63.69994921875, 63.70396484375, 63.71351953125, 63.6979921875, 63.6918203125]",tokens/s,0.9890307123716925,kWh,0.0007521740069488685,0.00041225789324771534,0.003629864737222821,0.0047942966374194046,tokens/kWh,13140.613684244328,,s,629,645.8482131958002,1.0267857125529425,0.13035148276579694,1.0110596313476563,1.01170830078125,1.0120460571289063,2.107026708984375,"[1.0111242065429686, 1.0104995727539063, 1.0103193359375, 1.0103952026367187, 1.0104954223632812, 1.0104678955078126, 1.0100704956054687, 1.0106204223632813, 1.0101810913085938, 1.0110658569335937, 1.0104442749023437, 1.0103756713867187, 1.0101524658203125, 1.0103828735351563, 1.0101749877929687, 1.0103490600585938, 1.0100950927734376, 1.0109429931640626, 1.0103602905273437, 1.0109215698242187, 1.0110667724609375, 1.0112665405273438, 1.0110863647460937, 1.01104638671875, 1.0108724365234374, 1.0105343017578126, 1.0103214111328125, 1.010872314453125, 1.0106912841796876, 1.0113380737304687, 1.01102490234375, 1.011188720703125, 1.0115625, 1.0118072509765625, 1.011420166015625, 1.0109522094726562, 1.010282470703125, 1.0109522094726562, 1.0106019897460938, 1.0113290405273438, 1.0105897216796875, 1.0110146484375, 1.0105538330078125, 1.0109419555664063, 1.0105497436523438, 1.0111878051757812, 1.0105476684570311, 1.0111273193359376, 1.0112327880859375, 1.0111181640625, 1.0113422241210936, 1.0108223266601561, 1.0107902221679688, 1.0107567749023438, 1.0107431030273437, 1.0107125854492187, 1.0108497924804687, 1.0110812377929688, 1.0113054809570312, 1.011472412109375, 1.010951171875, 1.011072998046875, 2.110793701171875, 1.0105712890625, 1.0109173583984374, 1.0111078491210939, 1.0105538940429688, 1.0114539184570313, 1.0104873046875, 1.0103736572265625, 1.01060302734375, 1.0104473876953124, 1.0103705444335938, 1.0103173217773438, 1.0103797607421876, 1.0106593017578125, 1.0105569458007813, 1.0109276123046875, 1.0109112548828125, 1.0105016479492188, 1.0110904541015624, 1.0104453125, 1.0106286010742187, 1.0105743408203125, 1.0106060791015625, 1.0110525512695312, 1.0107782592773438, 1.0118408813476563, 1.0113218383789062, 1.0111395874023437, 1.0107064208984375, 1.010524169921875, 1.0105466918945312, 1.0102415161132812, 1.010682861328125, 1.0110791625976563, 1.0108632202148438, 1.01125830078125, 1.0108221435546876, 1.0107473754882812, 1.0111160278320312, 1.0105282592773437, 1.0108026733398439, 1.0104678344726563, 1.0108098754882813, 1.0106849365234376, 1.0112696533203125, 1.0114283447265624, 1.01136181640625, 1.01125634765625, 1.0113484497070313, 1.0112225341796874, 1.0113013916015625, 1.0110914306640626, 1.0109788208007813, 1.011293212890625, 1.0114918212890625, 1.0115665893554688, 1.0111375122070312, 1.0111918334960937, 1.0110986328125, 1.0106838989257811, 1.011178466796875, 1.01096142578125, 1.0111539306640625, 2.106599365234375, 1.0114857177734375, 1.0112235717773437, 1.011198974609375, 1.01125732421875, 1.010946044921875, 1.0106123657226562, 1.010948974609375, 1.0108016357421874, 1.0105702514648438, 1.011140625, 1.0107391967773438, 1.0105272216796874, 1.010386962890625, 1.0104515991210938, 1.010242431640625, 1.0104063720703125, 1.010171875, 1.0105518188476563, 1.0103695068359375, 1.01096142578125, 1.0108364868164061, 1.0114160766601563, 1.0112081909179687, 1.0110771484375, 1.01035107421875, 1.0108743896484376, 1.01146728515625, 1.0117959594726562, 1.0120355834960937, 1.012463623046875, 1.0119403686523438, 1.01194140625, 1.0119802856445312, 1.011323974609375, 1.0110104370117188, 1.0110330810546875, 1.0113382568359375, 1.0116249389648437, 1.0108795166015625, 1.01194140625, 1.0112030639648437, 1.0113484497070313, 1.011093505859375, 1.0115000610351563, 1.0112604370117189, 1.0109788208007813, 1.0104954833984374, 1.011493896484375, 1.01127783203125, 1.0117058715820313, 1.0108528442382811, 1.0114898071289062, 1.0111221923828124, 1.0114129638671876, 1.011178466796875, 1.0114631958007811, 1.0113863525390625, 1.0115481567382814, 1.01125732421875, 1.0124105834960937, 1.0113810424804688, 1.0117324829101562, 2.10709814453125, 1.0106634521484374, 1.0110371704101562, 1.0109081420898438, 1.0120171508789062, 1.0121226196289062, 1.011262451171875, 1.0106112060546875, 1.0108251953125, 1.0108651733398437, 1.0108108520507812, 1.0108057861328126, 1.0110238647460938, 1.0108446655273438, 1.01113037109375, 1.0110238647460938, 1.0114150390625, 1.01075146484375, 1.0109603881835938, 1.0105835571289064, 1.010745361328125, 1.0105211181640625, 1.0108262329101563, 1.01054052734375, 1.0108231811523438, 1.0112245483398437, 1.0115286865234374, 1.0111610717773438, 1.0113402709960937, 1.0108292846679687, 1.0113024291992188, 1.0110156860351562, 1.011251220703125, 1.0109337768554687, 1.0112122802734376, 1.0112041015625, 1.0106275634765625, 1.0104483642578126, 1.0109224853515626, 1.0123694458007813, 1.0121512451171875, 1.0115277099609374, 1.011831787109375, 1.0113003540039063, 1.0119905395507813, 1.0121932983398438, 1.01144677734375, 1.0107801513671875, 1.011119140625, 1.0112194213867187, 1.0110924682617188, 1.0110105590820313, 1.0112767944335936, 1.0110873413085937, 1.0113557739257812, 1.0116627807617187, 1.0114488525390626, 1.01161474609375, 1.0121482543945313, 1.0110945434570313, 1.0111918334960937, 1.0114703369140625, 1.011420166015625, 2.107243408203125, 1.011304443359375, 1.0116792602539062, 1.0110904541015624, 1.0111897583007812, 1.0108170166015624, 1.0107166748046874, 1.010555908203125, 1.0108446655273438, 1.0105784301757812, 1.0112839965820313, 1.0110791625976563, 1.011198974609375, 1.0106951904296875, 1.0111498413085938, 1.0108477172851562, 1.0110279541015625, 1.0106736450195313, 1.01078125, 1.0110596313476563, 1.0108917846679688, 1.0110576782226564, 1.0108016357421874, 1.01058251953125, 1.0113638305664063, 1.01103515625, 1.0112214965820312, 1.0109030151367187, 1.0114221801757812, 1.01172021484375, 1.011646484375, 1.0109634399414062, 1.0110392456054687, 1.01072998046875, 1.0113239135742187, 1.0107545776367188, 1.0113812255859376, 1.0107801513671875, 1.0113116455078126, 1.011267578125, 1.0112327880859375, 1.0109522094726562, 1.0110904541015624, 1.0106736450195313, 1.0117355346679688, 1.0115389404296875, 1.0110474243164063, 1.0108436279296875, 1.01097265625, 1.0110955810546876, 1.011800048828125, 1.0115277099609374, 1.0110167236328125, 1.0107422485351563, 1.0111610717773438, 1.0109552612304689, 1.01119384765625, 1.0112645263671876, 1.0109450073242188, 1.0111027221679687, 1.0116802368164062, 1.0114508666992188, 1.0115563354492187, 2.106843017578125, 1.01136181640625, 1.0113515625, 1.0108784790039063, 1.0107105102539062, 1.0108948364257813, 1.0118154296875, 1.0109542236328124, 1.0106787719726562, 1.0108887329101564, 1.0112225341796874, 1.0112604370117189, 1.0115020751953125, 1.010966552734375, 1.0110965576171875, 1.0111488037109375, 1.0121779174804688, 1.01123583984375, 1.0113885498046875, 1.011718017578125, 1.0113597412109374, 1.0111610717773438, 1.0113341674804688, 1.0110709838867187, 1.01110986328125, 1.0114406127929687, 1.0113106079101561, 1.0103971557617188, 1.0105989379882812, 1.0106224365234375, 1.010735107421875, 1.0104473876953124, 1.010703369140625, 1.0104063720703125, 1.01077099609375, 1.0111979370117188, 1.01096240234375, 1.0108262329101563, 1.011472412109375, 1.011409912109375, 1.0115133666992187, 1.0109255981445313, 1.0116690063476563, 1.0109921264648438, 1.0113484497070313, 1.011146728515625, 1.0115604248046874, 1.0110126342773438, 1.0111477661132813, 1.0111273193359376, 1.01097265625, 1.0108907470703126, 1.0107822265625, 1.0106972045898437, 1.0111293334960938, 1.011146728515625, 1.0112236938476562, 1.0110370483398436, 1.0111190795898437, 1.0111826171875, 1.01117236328125, 1.0109869995117187, 1.0112276611328126, 2.108168212890625, 1.0113248901367187, 1.0116497192382812, 1.0111066284179688, 1.0111590576171876, 1.010862060546875, 1.0108671875, 1.0107658081054687, 1.0111181030273437, 1.010798583984375, 1.0113054809570312, 1.0110842895507812, 1.01054052734375, 1.0102753295898437, 1.0105231323242188, 1.0106941528320312, 1.0107473754882812, 1.0110699462890624, 1.01083544921875, 1.0106234741210938, 1.0105692138671876, 1.0113546142578125, 1.0107955322265625, 1.01106689453125, 1.011330078125, 1.0107811889648437, 1.010820068359375, 1.0105374755859375, 1.0116658935546874, 1.011794921875, 1.0121381225585937, 1.0115756225585937, 1.0113126220703126, 1.0110075073242188, 1.0110658569335937, 1.01071875, 1.0106624145507812, 1.0105538330078125, 1.0108600463867188, 1.0104791259765624, 1.0111047973632812, 1.010713623046875, 1.0108712768554688, 1.0103910522460937, 1.0111826171875, 1.0108528442382811, 1.0117273559570312, 1.0112481079101563, 1.0113095703125, 1.0112542724609375, 1.0117642211914062, 1.0118338623046874, 1.0117969970703125, 1.0115205078125, 1.0120662841796875, 1.0122158203125, 1.011726318359375, 1.0116546630859375, 1.011726318359375, 1.0113873901367187, 1.0115338134765626, 1.0122117309570313, 1.0120550537109374, 2.11171728515625, 1.0110648193359375, 1.0112655639648438, 1.011040283203125, 1.0112337646484375, 1.011103759765625, 1.0117447509765625, 1.011694580078125, 1.0115451049804687, 1.0108006591796874, 1.0108999633789062, 1.0106654663085937, 1.0113208618164062, 1.0110341186523437, 1.0105753784179687, 1.0110699462890624, 1.0127493286132812, 1.0114774780273437, 1.0117990112304687, 1.0114754638671875, 1.01243701171875, 1.0126602172851562, 1.0127821044921874, 1.012316162109375, 1.0116639404296874, 1.0120989990234375, 1.0121307983398438, 1.0107197265625, 1.0107975463867187, 1.0107320556640624, 1.0111702880859375, 1.0104524536132813, 1.0106388549804688, 1.01058251953125, 1.0108671875, 1.0108385009765626, 1.0115369262695313, 1.0105947875976562, 1.010924560546875, 1.0109214477539064, 1.0113648681640626, 1.0106675415039061, 1.0112532348632812, 1.0106183471679688, 1.0113659057617188, 1.0111826171875, 1.0120530395507812, 1.0109584350585938, 1.0110103759765625, 1.0113085327148437, 1.0110975952148438, 1.0110146484375, 1.01137109375, 1.0108394775390626, 1.0119804077148438, 1.01148046875, 1.0113474731445313, 1.0114826049804688, 1.0118276977539062, 1.0114006958007813, 1.011209228515625, 1.0110105590820313, 1.0114006958007813, 2.110498779296875, 1.0119219360351563, 1.01136181640625, 1.0108313598632812, 1.0110064697265626, 1.0115112915039062, 1.0112849731445313, 1.011282958984375, 1.0111334228515625, 1.01097265625, 1.011146728515625, 1.0113351440429688, 1.0111365356445312, 1.010808837890625, 1.010713623046875, 1.0107698974609376, 1.0108856201171874, 1.0107218017578126, 1.01076171875, 1.0107422485351563, 1.0111477661132813, 1.011072998046875, 1.01098291015625, 1.0104760131835937, 1.0106972045898437, 1.01064501953125, 1.0107012939453126, 1.010724853515625, 1.0107740478515626, 1.0106009521484376, 1.010820068359375, 1.0108784790039063, 1.0107863159179689, 1.0108590698242188, 1.01104736328125, 1.0110914306640626, 1.0109132690429687, 1.0108549194335938, 1.0113065795898437, 1.0108507690429687, 1.0111047973632812, 1.0121011352539062, 1.0114979858398438, 1.0112440185546876, 1.0114396362304687, 1.0111139526367188, 1.0110658569335937, 1.01098291015625, 1.011177490234375, 1.0114918212890625, 1.0113802490234376, 1.0121666259765625, 1.0110453491210938, 1.0111324462890625, 1.0114744262695312, 1.0106736450195313, 1.0109685668945312, 1.0110310668945313, 1.0110238647460938, 1.0108549194335938, 1.0109911499023438, 1.0115542602539063, 1.010951171875, 2.10921875, 1.0104708862304688, 1.0106941528320312, 1.0109368286132812, 1.0107924194335938, 1.0110361328125, 1.0113505249023438, 1.0111631469726563, 1.010882568359375, 1.011051513671875, 1.0112184448242187, 1.0114017333984375, 1.0117243041992188, 1.0111027221679687, 1.0108477172851562, 1.0110914306640626, 1.0114437255859374, 1.01085595703125, 1.0109983520507813, 1.0107094116210937, 1.0110496215820313, 1.010912109375, 1.0108661499023437, 1.010777099609375, 1.0107576904296875, 1.0106285400390624, 1.0111324462890625, 1.011009521484375, 1.01085693359375, 1.0107545776367188, 1.0111273193359376, 1.010572265625, 1.0108016357421874, 1.0107443237304687, 1.0111344604492187, 1.010861083984375, 1.0115419921875, 1.0107218017578126, 1.0108344116210937, 1.0103900146484375, 1.0109389038085939, 1.0106685180664063, 1.0109644775390625, 1.0104258422851562, 1.0109030151367187, 1.0110167236328125, 1.0119915771484376, 1.011103759765625, 1.0113802490234376, 1.0111365356445312, 1.0113638305664063, 1.0108446655273438, 1.0109859619140624, 1.01060400390625, 1.0110167236328125, 1.0108129272460937, 1.0115399780273437, 1.0108231811523438, 1.0112481079101563, 1.0108765869140626, 1.0110595703125, 1.0108630981445312, 1.0109900512695313]",tokens/s,0.9739130451218064,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2173.579264,6201.802752,0.0,5555.355648,5274.11712,s,10,6.188704040527344,0.6188704040527343,0.001630006512575581,0.6182249450683593,0.6205656127929687,0.6214197998046874,0.6221031494140624,"[0.6199492797851562, 0.6180035400390625, 0.6172139282226563, 0.6179977416992187, 0.618110595703125, 0.6165477905273438, 0.6183392944335937, 0.6222739868164062, 0.61989208984375, 0.6203757934570312]",tokens/s,413.65687924896486,kWh,7.2883394029405375e-06,3.9920504876135965e-06,3.5213917060001095e-05,4.649430695055522e-05,tokens/kWh,5506050.456290174,MB,2173.579264,6201.802752,0.0,5555.355648,5324.909056,s,10,366.72262109375004,36.672262109375005,0.03669260702690663,36.653501953125,36.74228671875,36.744877734375,36.746950546875006,"[36.7417109375, 36.74746875, 36.65041015625, 36.64756640625, 36.6498203125, 36.6483046875, 36.65363671875, 36.6533671875, 36.662453125, 36.6678828125]",tokens/s,1.7179196585174519,kWh,0.0004329811916251978,0.000237312961880707,0.002063029955978196,0.0027333241094841007,tokens/kWh,23048.858267997675,,s,629,371.7092334594727,0.5909526764061569,0.0735102746996128,0.581843994140625,0.5835925537109375,0.584360986328125,1.1999008935546875,"[0.5815029907226562, 0.58220849609375, 0.58231396484375, 0.5818121948242188, 0.581771240234375, 0.581728271484375, 0.581939208984375, 0.5820538940429687, 0.5821102294921875, 0.581634033203125, 0.5816616821289062, 0.5819156494140625, 0.581291015625, 0.5822781372070313, 0.5820303344726563, 0.5819361572265624, 0.581734375, 0.5818613891601563, 0.5819801635742188, 0.582466552734375, 0.5818736572265625, 0.581818359375, 0.5841039428710938, 0.58378955078125, 0.584427490234375, 0.58435791015625, 0.5840650024414062, 0.5840220336914063, 0.584052734375, 0.584026123046875, 0.5835888671875, 0.5834075927734375, 0.583383056640625, 0.5840967407226563, 0.5844940795898438, 0.5843302612304687, 0.5861007080078126, 0.5830584106445312, 0.5844940795898438, 0.5844203491210938, 0.5834240112304687, 0.58357861328125, 0.5838602294921875, 0.5845339965820312, 0.5830901489257813, 0.5837598876953125, 0.5830502319335937, 0.58431591796875, 0.5842994995117188, 0.5837005004882813, 0.5831137084960938, 0.5829529418945313, 0.5845821533203125, 0.5848340454101563, 0.5842728881835938, 0.5823283081054688, 0.5856133422851563, 0.5826621704101562, 0.5840762939453125, 0.5840148315429687, 0.5838244018554688, 0.583173095703125, 1.2010567626953126, 0.5827809448242187, 0.58383154296875, 0.58437939453125, 0.5838694458007813, 0.58402099609375, 0.5823805541992187, 0.5836072998046875, 0.5824061279296875, 0.5832857666015625, 0.5830154418945312, 0.5828423461914063, 0.5844193115234375, 0.583710693359375, 0.5833215942382812, 0.5818582763671875, 0.5821634521484375, 0.5816187133789062, 0.5839032592773438, 0.5848248291015625, 0.5831044921875, 0.5835745239257812, 0.5840752563476562, 0.58448486328125, 0.5853388671875, 0.5839749145507812, 0.5822197875976562, 0.5817006225585938, 0.581855224609375, 0.5835755615234375, 0.5830338745117187, 0.5852354736328125, 0.584363037109375, 0.5837445068359375, 0.58349365234375, 0.58421142578125, 0.585006103515625, 0.5831065673828125, 0.5816985473632813, 0.5817743530273437, 0.5817098388671875, 0.5845473022460937, 0.5823252563476562, 0.5827501831054688, 0.5815951538085937, 0.5826806030273437, 0.5863434448242187, 0.5844859008789063, 0.5844592895507813, 0.584052734375, 0.5841151733398438, 0.5830891723632813, 0.5820047607421875, 0.5847992553710938, 0.5855047607421875, 0.582703125, 0.5821572875976563, 0.5824225463867188, 0.5825310668945313, 0.58260888671875, 0.5822177124023438, 0.5819852905273437, 0.5815715942382812, 1.1993958740234374, 0.5825433349609375, 0.5819463500976563, 0.5820119018554688, 0.581875732421875, 0.5813340454101562, 0.5827901611328125, 0.5819699096679688, 0.581412841796875, 0.5816166381835938, 0.5814906616210938, 0.5816627197265625, 0.5826836547851563, 0.5817589721679688, 0.5819658203125, 0.5817825317382812, 0.5814906616210938, 0.5817098388671875, 0.582255615234375, 0.5815142211914063, 0.5814528198242187, 0.5813032836914063, 0.5811845092773438, 0.5815469970703125, 0.5824461059570313, 0.5814261474609375, 0.58144970703125, 0.5815429077148437, 0.5813892822265625, 0.5822146606445312, 0.5814312744140625, 0.581739501953125, 0.5815429077148437, 0.5813053588867187, 0.5817047119140625, 0.58222900390625, 0.5819566040039063, 0.5814466552734375, 0.5814537963867188, 0.5814927368164062, 0.581306396484375, 0.5819422607421875, 0.581738525390625, 0.5812838134765625, 0.5814528198242187, 0.5814886474609375, 0.5819361572265624, 0.5819166870117187, 0.5818040161132813, 0.5816985473632813, 0.5815736083984375, 0.5821388549804688, 0.5815367431640625, 0.582192138671875, 0.5816207275390625, 0.5815726318359375, 0.5814241333007812, 0.5814957885742188, 0.582708251953125, 0.581718017578125, 0.58149169921875, 0.5818613891601563, 0.5822637939453125, 1.2004178466796875, 0.5815613403320312, 0.5825177612304687, 0.5818429565429688, 0.5822146606445312, 0.5813770141601563, 0.5820886840820313, 0.5818121948242188, 0.5816586303710938, 0.5821511840820313, 0.58155517578125, 0.582277099609375, 0.5820498046875, 0.5820149536132813, 0.5815562133789063, 0.5816012573242187, 0.58168115234375, 0.5823037719726563, 0.5814323120117187, 0.5812920532226562, 0.5816371459960937, 0.5818091430664063, 0.5820221557617188, 0.5824839477539062, 0.5814835205078125, 0.581549072265625, 0.5811885986328125, 0.5815357666015625, 0.5823446655273438, 0.5814722290039063, 0.5815715942382812, 0.5821900634765625, 0.5817610473632813, 0.5821563110351563, 0.5818327026367187, 0.5814814453125, 0.5814220581054688, 0.5811742553710938, 0.5813903198242187, 0.5816350708007813, 0.581591064453125, 0.5814251708984375, 0.5813258056640624, 0.5812254638671875, 0.5822904052734375, 0.5816094970703125, 0.58172314453125, 0.58190234375, 0.5816903686523438, 0.581370849609375, 0.5813626708984375, 0.5817210693359375, 0.581518310546875, 0.5814149169921875, 0.5811558227539062, 0.581285888671875, 0.5823262939453125, 0.5823775024414063, 0.5814466552734375, 0.5813217163085938, 0.5813084106445312, 0.581475341796875, 0.5812244262695313, 1.2000972900390625, 0.5818818359375, 0.5816494140625, 0.5814087524414062, 0.5819525146484374, 0.5813268432617188, 0.5813104858398438, 0.5814395141601563, 0.5813831787109375, 0.5815377807617188, 0.5815398559570313, 0.5813851928710938, 0.5812265014648438, 0.5813248291015625, 0.581285888671875, 0.5814814453125, 0.58180712890625, 0.58216650390625, 0.5817006225585938, 0.581665771484375, 0.581285888671875, 0.5814927368164062, 0.5819913940429687, 0.5817118530273437, 0.58148046875, 0.5813248291015625, 0.5815654296875, 0.5814271850585937, 0.5818184204101563, 0.581286865234375, 0.5811497192382813, 0.58169140625, 0.5815244750976563, 0.5822689208984375, 0.5820487670898438, 0.5819586791992187, 0.5819259033203125, 0.5818101806640625, 0.582055908203125, 0.5828423461914063, 0.5817907104492187, 0.5819053955078125, 0.5818429565429688, 0.5818910522460937, 0.5818316650390625, 0.5823355102539063, 0.5815357666015625, 0.5818480834960937, 0.5816524658203125, 0.5814456176757813, 0.5823866577148438, 0.5815685424804687, 0.5816985473632813, 0.5815982055664063, 0.5815838623046875, 0.5819381713867188, 0.5823416137695312, 0.5835530395507813, 0.5818255615234375, 0.581444580078125, 0.5817507934570313, 0.58191259765625, 0.5820006103515625, 1.1981956787109376, 0.5812254638671875, 0.58144873046875, 0.5819668579101562, 0.5815818481445313, 0.5812940673828125, 0.5811456298828125, 0.5813626708984375, 0.5811497192382813, 0.58141796875, 0.5813995361328125, 0.5813135375976562, 0.58134423828125, 0.58155517578125, 0.5815787353515625, 0.5828638916015625, 0.5816575927734375, 0.5815111694335937, 0.5813616943359375, 0.5820538940429687, 0.5820927734375, 0.5819678955078125, 0.5817692260742188, 0.5816145629882813, 0.5816954956054687, 0.5814374389648438, 0.5818951416015625, 0.5821614379882812, 0.5817313232421875, 0.5813452758789063, 0.581381103515625, 0.5819310302734375, 0.5822279663085937, 0.5815029907226562, 0.5817210693359375, 0.5818112182617188, 0.5817722778320312, 0.5819627685546875, 0.583836669921875, 0.5821726684570312, 0.5814364013671875, 0.5819780883789063, 0.5817313232421875, 0.5815214233398438, 0.5822955322265625, 0.5814681396484375, 0.5818040161132813, 0.58149169921875, 0.5819668579101562, 0.5824163818359375, 0.5818613891601563, 0.5814384765625, 0.581412841796875, 0.5816555786132812, 0.58149169921875, 0.5826007080078125, 0.5815326538085938, 0.5814886474609375, 0.5814763793945312, 0.5815326538085938, 0.5814620361328126, 0.5819299926757813, 0.5814906616210938, 1.2004239501953125, 0.5820057373046875, 0.5815797729492187, 0.5823160400390625, 0.5817426147460938, 0.5813114624023438, 0.5819115600585938, 0.5817979125976562, 0.5821522216796875, 0.5816882934570312, 0.5816770629882813, 0.581875732421875, 0.5819259033203125, 0.5819658203125, 0.582761474609375, 0.582223876953125, 0.5820333862304687, 0.5817845458984375, 0.5826467895507812, 0.5818388061523437, 0.5823938598632813, 0.5820436401367187, 0.581734375, 0.5818245239257812, 0.5811896362304687, 0.5817927856445313, 0.5819688720703124, 0.5817190551757813, 0.5813688354492188, 0.5810667724609375, 0.5812234497070312, 0.581843994140625, 0.5815643920898438, 0.58111181640625, 0.5813903198242187, 0.5819085083007812, 0.58183984375, 0.582118408203125, 0.5814773559570312, 0.5814302978515625, 0.5815347290039062, 0.581191650390625, 0.5816688842773438, 0.5815234375, 0.5817190551757813, 0.5818490600585937, 0.5814508056640625, 0.5813779907226563, 0.5813463134765625, 0.5819893798828125, 0.581802978515625, 0.5817333984375, 0.5816944580078125, 0.58290380859375, 0.582845458984375, 0.5820845947265625, 0.5818582763671875, 0.5815060424804688, 0.5813718872070313, 0.5813084106445312, 0.58233447265625, 0.58221875, 0.582023193359375, 1.2042802734375, 0.5814743041992188, 0.5829151000976562, 0.581739501953125, 0.582096923828125, 0.5818674926757812, 0.5823180541992188, 0.5825535888671876, 0.581813232421875, 0.5818327026367187, 0.5814989013671875, 0.5814200439453125, 0.58134326171875, 0.5823580322265625, 0.5821480712890625, 0.5814957885742188, 0.581401611328125, 0.58191357421875, 0.5817169799804688, 0.5821388549804688, 0.5816801147460937, 0.581734375, 0.58136474609375, 0.5820723266601563, 0.5818541870117188, 0.5822105712890625, 0.5819094848632812, 0.5814200439453125, 0.5814589233398437, 0.5813892822265625, 0.5821788330078125, 0.5815685424804687, 0.5816401977539063, 0.5814948120117187, 0.5815224609375, 0.58134326171875, 0.5816104736328125, 0.5815562133789063, 0.5814251708984375, 0.5814835205078125, 0.5819678955078125, 0.5815869140625, 0.5825003662109375, 0.5818951416015625, 0.5816432495117188, 0.5817241821289062, 0.5819412231445312, 0.5827870483398437, 0.5818347778320313, 0.5818951416015625, 0.5817241821289062, 0.5815101318359375, 0.5815992431640625, 0.5828341674804688, 0.58174462890625, 0.5817559204101562, 0.5814732666015625, 0.5818153076171875, 0.5818726196289062, 0.5815941162109375, 0.5818357543945313, 0.5814814453125, 0.581423095703125, 1.20260302734375, 0.5818193969726563, 0.581865478515625, 0.581950439453125, 0.5817681884765625, 0.5818214111328125, 0.5814395141601563, 0.5819514770507812, 0.5817221069335937, 0.5816934204101563, 0.5817293090820312, 0.581970947265625, 0.5828628540039062, 0.5817088012695313, 0.5820282592773437, 0.5821061401367188, 0.5815398559570313, 0.5825218505859375, 0.5821337890625, 0.5817528076171875, 0.5816156005859375, 0.5817528076171875, 0.5815244750976563, 0.5816012573242187, 0.5817876586914063, 0.5815234375, 0.5816299438476562, 0.5816361083984375, 0.58243994140625, 0.5818275756835938, 0.581982177734375, 0.5820805053710938, 0.5815951538085937, 0.5817456665039062, 0.5822423095703125, 0.5822945556640625, 0.5821808471679687, 0.5818050537109375, 0.58201806640625, 0.5817753295898438, 0.5820897216796875, 0.5823733520507812, 0.5820651245117188, 0.5818265380859375, 0.581950439453125, 0.5819739990234375, 0.5821327514648438, 0.5821265869140625, 0.5822382202148437, 0.5820354614257812, 0.5818521728515625, 0.582096923828125, 0.5824255981445312, 0.5819852905273437, 0.58187060546875, 0.581559326171875, 0.5813483276367187, 0.5814466552734375, 0.5824675903320312, 0.5822116088867187, 0.582044677734375, 0.5817651977539062, 0.5824183959960938, 1.2028917236328125, 0.5816196899414062, 0.5820651245117188, 0.58216650390625, 0.5816453247070312, 0.5824542846679688, 0.58161767578125, 0.5813831787109375, 0.5814927368164062, 0.5813555297851563, 0.5818265380859375, 0.5822269287109375, 0.5816893310546875, 0.5816524658203125, 0.5816483764648438, 0.582150146484375, 0.5817559204101562, 0.5817753295898438, 0.5819832153320312, 0.58170263671875, 0.5822689208984375, 0.58190234375, 0.5830420532226562, 0.5826693115234375, 0.5818050537109375, 0.5818674926757812, 0.5821439819335937, 0.5822811889648437, 0.582197265625, 0.5828679809570313, 0.5820733642578125, 0.5823170776367188, 0.582361083984375, 0.5818163452148437, 0.5824215087890625, 0.5816258544921875, 0.5819381713867188, 0.5818787841796875, 0.5817630615234375, 0.5824501953125, 0.5823272705078125, 0.5820313720703125, 0.5818060913085937, 0.5822310180664062, 0.58191259765625, 0.5820538940429687, 0.5824081420898437, 0.5817354125976563, 0.5822463989257812, 0.5826129760742188, 0.5819381713867188, 0.5827451171875, 0.58174462890625, 0.5820057373046875, 0.5819617309570313, 0.581875732421875, 0.5822545776367187, 0.5817784423828125, 0.5824000244140625, 0.5813032836914063, 0.5822054443359375, 0.5817518310546875, 0.5821992797851563]",tokens/s,1.6921828767769354,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1736.183808,12352.749568,0.0,11706.302464,11035.465216,s,10,13.024343627929689,1.3024343627929689,0.002338564901261307,1.30185107421875,1.3038882202148436,1.3064520446777343,1.3085031042480468,"[1.309015869140625, 1.3033184814453125, 1.3003494873046875, 1.3011973876953125, 1.3011253662109374, 1.300955810546875, 1.301638427734375, 1.302063720703125, 1.3022261962890624, 1.302452880859375]",tokens/s,196.55501061184225,kWh,1.5360654360718197e-05,8.415124671664671e-06,7.215477994602094e-05,9.593055897840379e-05,tokens/kWh,2668596.9802139024,MB,1736.183808,12352.749568,0.0,11706.302464,11329.172992,s,10,763.3166562499999,76.331665625,0.01404827179512581,76.33404687500001,76.34705,76.34801328125,76.34878390625,"[76.3116171875, 76.3329609375, 76.3489765625, 76.3411171875, 76.3468359375, 76.331546875, 76.3441953125, 76.3351328125, 76.31521875, 76.3090546875]",tokens/s,0.8253455428249735,kWh,0.0009011750282347202,0.0004939259724664952,0.0042084087556131695,0.005603509756314386,tokens/kWh,11242.953566558468,,s,629,773.821041748047,1.2302401299650985,0.15462762199250651,1.21159375,1.2122087890625,1.2124819091796875,2.513270859375,"[1.2114217529296876, 1.2109813232421875, 1.2112762451171875, 1.2113695068359376, 1.2109967041015626, 1.21078369140625, 1.210820556640625, 1.2114083251953125, 1.211473876953125, 1.2109915771484374, 1.2110264892578124, 1.211177001953125, 1.2111329345703126, 1.2108482666015625, 1.210894287109375, 1.2108328857421875, 1.2114595947265625, 1.21109912109375, 1.2110377197265625, 1.2114605712890625, 1.21109814453125, 1.2109107666015626, 1.2113837890625, 1.211439208984375, 1.2113089599609375, 1.2114852294921874, 1.2108953857421876, 1.211968505859375, 1.2109854736328125, 1.2114114990234375, 1.211503662109375, 1.2115701904296876, 1.2111954345703124, 1.212129150390625, 1.211504638671875, 1.2116378173828124, 1.21143701171875, 1.211441162109375, 1.2110633544921876, 1.21100390625, 1.210900390625, 1.2119766845703126, 1.21135302734375, 1.211661376953125, 1.21140625, 1.211376708984375, 1.21117578125, 1.211125732421875, 1.2112117919921874, 1.2114166259765624, 1.2111329345703126, 1.2116644287109375, 1.2111021728515625, 1.211261962890625, 1.2113489990234374, 1.2112978515625, 1.211167724609375, 1.211284423828125, 1.2117083740234376, 1.2115958251953125, 1.2114329833984374, 1.2118804931640625, 2.514330810546875, 1.210652587890625, 1.2109178466796875, 1.21115234375, 1.210639404296875, 1.2111728515625, 1.2111964111328124, 1.2109495849609375, 1.210982421875, 1.210829833984375, 1.211577392578125, 1.2116376953125, 1.2123822021484374, 1.2119183349609375, 1.2111278076171874, 1.2120401611328124, 1.2116695556640624, 1.2114871826171876, 1.2117001953125, 1.2118865966796875, 1.211429931640625, 1.2113817138671874, 1.21173095703125, 1.2116612548828125, 1.211536376953125, 1.211513916015625, 1.2119930419921876, 1.2118671875, 1.2117279052734375, 1.211217041015625, 1.2116253662109375, 1.211358154296875, 1.211610107421875, 1.21132958984375, 1.2115968017578125, 1.2115753173828125, 1.212295166015625, 1.211431884765625, 1.211931640625, 1.21170947265625, 1.21185791015625, 1.2116807861328125, 1.211569091796875, 1.2115169677734374, 1.2117945556640626, 1.211475830078125, 1.2115762939453125, 1.2120863037109375, 1.211937744140625, 1.2119654541015625, 1.2116817626953125, 1.2118035888671874, 1.212337158203125, 1.2117073974609376, 1.2118385009765624, 1.2118773193359376, 1.21191015625, 1.2120863037109375, 1.21194091796875, 1.212105712890625, 1.2121497802734376, 1.2118917236328124, 1.211799560546875, 2.513431640625, 1.2114862060546876, 1.2116961669921875, 1.2118785400390626, 1.2121865234375, 1.2122286376953124, 1.2120013427734375, 1.2121025390625, 1.21226953125, 1.211442138671875, 1.2113643798828124, 1.2111483154296876, 1.2115557861328126, 1.2120247802734374, 1.2112076416015625, 1.2115435791015625, 1.2116663818359374, 1.2110797119140626, 1.2113746337890625, 1.2114166259765624, 1.2112281494140624, 1.2118333740234375, 1.211620361328125, 1.2116275634765625, 1.211684814453125, 1.2115281982421875, 1.212507080078125, 1.2123299560546874, 1.211989990234375, 1.2121068115234375, 1.212232666015625, 1.2115784912109375, 1.21185888671875, 1.211282470703125, 1.211768798828125, 1.2113284912109374, 1.2114544677734376, 1.2116142578125, 1.2119736328125, 1.21160400390625, 1.2118660888671875, 1.211783203125, 1.211763671875, 1.21185888671875, 1.211821044921875, 1.211894775390625, 1.212494873046875, 1.2119080810546874, 1.2119388427734374, 1.2123709716796875, 1.211740234375, 1.2130374755859374, 1.212662841796875, 1.212389404296875, 1.21267919921875, 1.2124569091796875, 1.212505126953125, 1.212078125, 1.2119132080078125, 1.2119111328125, 1.2119111328125, 1.2122039794921875, 1.212859375, 2.513799072265625, 1.2114248046875, 1.211298828125, 1.2112598876953125, 1.2111002197265626, 1.211157470703125, 1.2119388427734374, 1.2117012939453125, 1.211177001953125, 1.21225927734375, 1.2121456298828126, 1.212099609375, 1.2122501220703126, 1.212095458984375, 1.2122705078125, 1.212464111328125, 1.21236474609375, 1.21200439453125, 1.211461669921875, 1.21135107421875, 1.2123013916015626, 1.2120933837890624, 1.2120391845703125, 1.2125921630859375, 1.2116397705078126, 1.21157421875, 1.2111861572265625, 1.21097216796875, 1.2113848876953126, 1.2114390869140625, 1.21153125, 1.211658203125, 1.2115947265625, 1.2115711669921876, 1.2116275634765625, 1.211242431640625, 1.21172998046875, 1.2113121337890624, 1.2114923095703125, 1.211609130859375, 1.2118292236328125, 1.2115927734375, 1.21164794921875, 1.2114503173828124, 1.2116920166015626, 1.2116182861328124, 1.2114801025390625, 1.211564208984375, 1.21261865234375, 1.2118896484375, 1.211937744140625, 1.2124937744140625, 1.2117872314453124, 1.2115753173828125, 1.2119429931640624, 1.211613037109375, 1.212316650390625, 1.211663330078125, 1.2121661376953126, 1.2119859619140625, 1.2116695556640624, 1.2118455810546875, 1.2117862548828124, 2.5132001953125, 1.21116162109375, 1.2113070068359375, 1.2113653564453124, 1.211356201171875, 1.2111585693359375, 1.21143505859375, 1.211683837890625, 1.2116121826171875, 1.2118189697265624, 1.2116448974609375, 1.21178515625, 1.2120279541015626, 1.21154052734375, 1.211658203125, 1.211451416015625, 1.21213134765625, 1.2125296630859375, 1.2122440185546874, 1.212705810546875, 1.2123677978515626, 1.2122071533203125, 1.2123084716796875, 1.2123751220703125, 1.2121220703125, 1.2126556396484376, 1.2121026611328125, 1.21187841796875, 1.212506103515625, 1.21231982421875, 1.212422119140625, 1.2116009521484374, 1.2117115478515625, 1.2120238037109374, 1.2117667236328125, 1.2115660400390624, 1.2122071533203125, 1.211494384765625, 1.211684814453125, 1.21158251953125, 1.211788330078125, 1.2116644287109375, 1.211957275390625, 1.2114073486328125, 1.211895751953125, 1.2115875244140626, 1.2115179443359374, 1.211845703125, 1.2115126953125, 1.2115343017578124, 1.2121968994140624, 1.2115262451171875, 1.211950927734375, 1.2120555419921875, 1.211916259765625, 1.2119879150390624, 1.2116162109375, 1.2115435791015625, 1.212336181640625, 1.2118609619140626, 1.21176171875, 1.211826171875, 1.2119696044921875, 2.513016845703125, 1.2115302734375, 1.21221533203125, 1.211273193359375, 1.211552734375, 1.21154248046875, 1.211378662109375, 1.2118077392578126, 1.2109425048828124, 1.2108125, 1.211199462890625, 1.2112393798828125, 1.2109864501953125, 1.2110008544921875, 1.2113499755859376, 1.21132958984375, 1.211335693359375, 1.2113797607421875, 1.211218994140625, 1.21196240234375, 1.211410400390625, 1.2120770263671874, 1.2116243896484375, 1.2119141845703125, 1.2118671875, 1.211895751953125, 1.2114534912109376, 1.2118670654296875, 1.211398193359375, 1.2112353515625, 1.21159375, 1.2112230224609375, 1.211525146484375, 1.211252685546875, 1.2116796875, 1.21173095703125, 1.2115252685546876, 1.21129150390625, 1.211832275390625, 1.211515869140625, 1.21154052734375, 1.21152001953125, 1.2113541259765626, 1.211916259765625, 1.211999267578125, 1.211557861328125, 1.2116612548828125, 1.2116644287109375, 1.211683837890625, 1.2118814697265625, 1.2116859130859374, 1.21154248046875, 1.212590087890625, 1.211631591796875, 1.21187841796875, 1.2120863037109375, 1.211619384765625, 1.2120166015625, 1.2119869384765625, 1.2120177001953125, 1.2121640625, 1.211704345703125, 1.211842529296875, 2.51329833984375, 1.2113704833984376, 1.2111278076171874, 1.21133056640625, 1.2119141845703125, 1.2111728515625, 1.2112230224609375, 1.2113807373046874, 1.2113182373046876, 1.211325439453125, 1.2114288330078125, 1.211557861328125, 1.211810791015625, 1.2117626953125, 1.21128759765625, 1.2117484130859375, 1.21141650390625, 1.2113756103515625, 1.2116080322265625, 1.211427978515625, 1.2113477783203126, 1.2119552001953124, 1.211189208984375, 1.2114248046875, 1.211610107421875, 1.21135205078125, 1.2119481201171876, 1.2117523193359374, 1.211484130859375, 1.2121180419921875, 1.21152099609375, 1.2116326904296875, 1.211673583984375, 1.2113121337890624, 1.2119869384765625, 1.2122685546875, 1.2123853759765626, 1.2128603515625, 1.2124027099609376, 1.2125235595703125, 1.2127314453125, 1.2127191162109374, 1.212885986328125, 1.2127242431640626, 1.2118333740234375, 1.211953125, 1.2121129150390626, 1.2117801513671875, 1.212015625, 1.211763671875, 1.2117012939453125, 1.2120667724609375, 1.21187939453125, 1.2117197265625, 1.2119981689453125, 1.21179443359375, 1.2120340576171875, 1.212147705078125, 1.21187841796875, 1.2120791015625, 1.21209033203125, 1.211905029296875, 1.2124200439453126, 2.514125732421875, 1.2114227294921875, 1.211292724609375, 1.211658203125, 1.2115589599609375, 1.2113983154296875, 1.2119888916015624, 1.2113704833984376, 1.211610107421875, 1.211758544921875, 1.2114852294921874, 1.21145654296875, 1.211821044921875, 1.2112169189453126, 1.2116602783203125, 1.211989990234375, 1.2113223876953125, 1.2115517578125, 1.2113489990234374, 1.2112762451171875, 1.2118035888671874, 1.211737060546875, 1.2115977783203125, 1.2129249267578126, 1.21261767578125, 1.2122420654296875, 1.211623291015625, 1.2114923095703125, 1.212148681640625, 1.2119019775390625, 1.21175146484375, 1.2118385009765624, 1.2116920166015626, 1.2118294677734376, 1.21165185546875, 1.2111922607421874, 1.2113212890625, 1.2114442138671875, 1.211694091796875, 1.21236474609375, 1.212018798828125, 1.211768798828125, 1.211747314453125, 1.2114288330078125, 1.2113223876953125, 1.2116746826171876, 1.21150048828125, 1.21159375, 1.21177294921875, 1.21147802734375, 1.212080078125, 1.21192138671875, 1.2113746337890625, 1.2114554443359375, 1.2115548095703126, 1.21162646484375, 1.211575439453125, 1.2113038330078125, 1.211806640625, 1.2116695556640624, 1.211400146484375, 1.2115343017578124, 1.2114677734375, 2.5139150390625, 1.2104947509765625, 1.2104796142578125, 1.2110601806640624, 1.210818603515625, 1.21080419921875, 1.2113817138671874, 1.2107110595703126, 1.2115599365234375, 1.210962890625, 1.21103466796875, 1.2109844970703125, 1.2109957275390626, 1.210639404296875, 1.210967041015625, 1.210735595703125, 1.211298828125, 1.2114892578125, 1.210882080078125, 1.2117001953125, 1.211292724609375, 1.2112998046875, 1.211736083984375, 1.2114698486328126, 1.2116868896484374, 1.212020751953125, 1.2116572265625, 1.2118538818359375, 1.211252685546875, 1.2111072998046875, 1.211619384765625, 1.2111688232421876, 1.211368408203125, 1.2118763427734376, 1.2112978515625, 1.211556884765625, 1.211440185546875, 1.2111011962890625, 1.21163671875, 1.2113223876953125, 1.2111298828125, 1.2116162109375, 1.2110653076171876, 1.2111217041015625, 1.211494384765625, 1.211451416015625, 1.2114759521484375, 1.21181689453125, 1.2115538330078126, 1.2116080322265625, 1.211831298828125, 1.211515869140625, 1.21173193359375, 1.211282470703125, 1.2113018798828126, 1.21145654296875, 1.2112281494140624, 1.2112547607421875, 1.2116612548828125, 1.21142578125, 1.21210986328125, 1.2116920166015626, 1.2119910888671874, 2.51358935546875, 1.21063427734375, 1.211430908203125, 1.210544189453125, 1.2111871337890625, 1.21103564453125, 1.2112589111328125, 1.2109700927734375, 1.2114503173828124, 1.2111072998046875, 1.21147802734375, 1.210945556640625, 1.2111922607421874, 1.211684814453125, 1.2114554443359375, 1.2112486572265626, 1.2119346923828125, 1.2112281494140624, 1.2114483642578124, 1.2110079345703124, 1.2108524169921875, 1.2109935302734376, 1.2109833984375, 1.2113858642578126, 1.2111492919921876, 1.210892333984375, 1.2111124267578126, 1.2112025146484375, 1.2109700927734375, 1.211509765625, 1.21159375, 1.2114063720703125, 1.211040771484375, 1.21109814453125, 1.210799072265625, 1.210892333984375, 1.210966064453125, 1.210841064453125, 1.2113079833984375, 1.21124560546875, 1.2112608642578124, 1.2112496337890626, 1.2110306396484376, 1.2115035400390626, 1.211241455078125, 1.2117279052734375, 1.2114945068359375, 1.2111419677734374, 1.2113212890625, 1.211431884765625, 1.2111728515625, 1.2113961181640625, 1.211167724609375, 1.210883056640625, 1.21160498046875, 1.211494384765625, 1.2115814208984375, 1.2117698974609374, 1.2115025634765626, 1.2116275634765625, 1.2118814697265625, 1.211230224609375, 1.21170947265625]",tokens/s,0.8128494394247809,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2265.919488,3330.801664,0.0,2684.35456,2447.59552,s,10,2.4030111236572265,0.24030111236572266,0.0012440053970201484,0.24010930633544922,0.24119976043701172,0.24230707931518555,0.24319293441772463,"[0.2434143981933594, 0.2409536895751953, 0.23907139587402343, 0.23946762084960938, 0.2396122589111328, 0.23890797424316407, 0.23970700073242188, 0.2405301055908203, 0.24051161193847656, 0.24083506774902344]",tokens/s,1065.330066430923,kWh,2.8237744456245787e-06,1.5472998550649208e-06,1.2583984934904673e-05,1.6955059235594172e-05,tokens/kWh,15098738.166751605,MB,2265.919488,3330.801664,0.0,2684.35456,2597.681664,s,10,139.56764257812497,13.9567642578125,0.0037662168729373563,13.95505322265625,13.962588671875,13.96323818359375,13.963757792968751,"[13.95483984375, 13.95481640625, 13.9549306640625, 13.95517578125, 13.9599765625, 13.9638876953125, 13.9624443359375, 13.953060546875, 13.952154296875, 13.9563564453125]",tokens/s,4.513940254076789,kWh,0.00016469605360239274,9.026671937368518e-05,0.0007221266028330957,0.0009770893758091737,tokens/kWh,64477.21320050864,,s,629,141.50236668396,0.22496401698562793,0.028578568615475336,0.22146662902832032,0.22197882690429688,0.222200830078125,0.46145660278320316,"[0.222501953125, 0.22133958435058593, 0.22126080322265626, 0.22115533447265626, 0.22122291564941407, 0.22115737915039063, 0.22111334228515625, 0.2211881561279297, 0.22123001098632813, 0.22108671569824218, 0.22202674865722657, 0.2213621826171875, 0.22130892944335936, 0.221412353515625, 0.22178816223144532, 0.22125669860839844, 0.22154550170898438, 0.22125155639648436, 0.22184141540527344, 0.22194586181640624, 0.22131817626953126, 0.22113481140136718, 0.22139187622070314, 0.22203596496582031, 0.22120550537109376, 0.22182707214355468, 0.22148197937011718, 0.22121574401855468, 0.22116249084472656, 0.22113999938964843, 0.22153315734863283, 0.22137344360351563, 0.22128536987304687, 0.22147686767578126, 0.22139497375488282, 0.2215167694091797, 0.22115020751953124, 0.22119526672363282, 0.2216222686767578, 0.22131507873535156, 0.2215731201171875, 0.22197042846679688, 0.2214871063232422, 0.22137344360351563, 0.22149530029296874, 0.2211829833984375, 0.22195916748046876, 0.22144102478027344, 0.22115122985839844, 0.22134988403320313, 0.2216417236328125, 0.22153318786621093, 0.2213396453857422, 0.22140824890136718, 0.2213949432373047, 0.22160794067382814, 0.22140109252929688, 0.22149017333984375, 0.22158233642578126, 0.2214686737060547, 0.2215290832519531, 0.2215116729736328, 0.4632596435546875, 0.22171238708496094, 0.221233154296875, 0.22143283081054688, 0.2215679931640625, 0.22207183837890626, 0.22199087524414063, 0.2218137664794922, 0.22159461975097655, 0.2211604766845703, 0.22112969970703125, 0.22134375, 0.22146156311035156, 0.2211706237792969, 0.2218250274658203, 0.2215004119873047, 0.2212454376220703, 0.22122291564941407, 0.22121881103515625, 0.22129049682617188, 0.22144000244140624, 0.22122496032714845, 0.22121574401855468, 0.22197247314453125, 0.2214799346923828, 0.22136012268066407, 0.22134066772460936, 0.22136422729492186, 0.22131097412109374, 0.22127923583984374, 0.22128128051757812, 0.22142156982421876, 0.22127308654785155, 0.22122598266601562, 0.22121676635742188, 0.22201344299316406, 0.2224384002685547, 0.22226841735839845, 0.22168576049804686, 0.22131507873535156, 0.2211666259765625, 0.22145021057128905, 0.2213509063720703, 0.2210908203125, 0.2211420135498047, 0.2215116729736328, 0.22274458312988282, 0.22148300170898438, 0.2212454376220703, 0.22115327453613282, 0.22183013916015626, 0.22141746520996095, 0.22183628845214845, 0.2215557098388672, 0.22127104187011717, 0.2220482635498047, 0.2213693389892578, 0.22153421020507813, 0.22129356384277343, 0.22213938903808594, 0.22136114501953125, 0.22106112670898437, 0.22137957763671876, 0.4608409729003906, 0.22114405822753908, 0.2210918426513672, 0.22119833374023437, 0.22196018981933593, 0.2215413818359375, 0.22137139892578125, 0.2214246368408203, 0.22122802734375, 0.22138983154296876, 0.2213478698730469, 0.2211327667236328, 0.22113591003417968, 0.2211645050048828, 0.22161613464355467, 0.22142771911621092, 0.22118502807617188, 0.22131715393066406, 0.22171746826171876, 0.22127410888671875, 0.22118911743164063, 0.22119740295410156, 0.22118185424804687, 0.22219775390625, 0.22114816284179686, 0.22126797485351563, 0.22138265991210937, 0.22202163696289062, 0.221623291015625, 0.22125567626953124, 0.2214297637939453, 0.22170930480957032, 0.22169293212890626, 0.22206874084472655, 0.22159257507324218, 0.2214799346923828, 0.22199090576171876, 0.22157720947265624, 0.22174208068847656, 0.22161613464355467, 0.2212843475341797, 0.22151271057128907, 0.22158233642578126, 0.22143283081054688, 0.22131404113769532, 0.22166323852539063, 0.22163046264648437, 0.22240870666503906, 0.22161407470703126, 0.22181785583496094, 0.22200831604003907, 0.22142874145507813, 0.22144102478027344, 0.22163661193847656, 0.22149530029296874, 0.22142668151855469, 0.22134169006347656, 0.2213683166503906, 0.2215076141357422, 0.2218208923339844, 0.221412353515625, 0.2214072265625, 0.22158233642578126, 0.46169601440429686, 0.2212136993408203, 0.22105702209472655, 0.2211266632080078, 0.2211031036376953, 0.22123622131347656, 0.22111846923828124, 0.22110105895996093, 0.22219468688964844, 0.22128536987304687, 0.2216058807373047, 0.22124237060546875, 0.22226022338867188, 0.22136323547363282, 0.2218741455078125, 0.22145741271972658, 0.22111538696289063, 0.22111231994628905, 0.22129766845703125, 0.22105702209472655, 0.22113792419433595, 0.22142874145507813, 0.2212351989746094, 0.22122496032714845, 0.22117068481445312, 0.22113279724121093, 0.22115122985839844, 0.22112973022460938, 0.2212024383544922, 0.2211102752685547, 0.2212956085205078, 0.22164582824707033, 0.2216058807373047, 0.22137548828125, 0.22140524291992186, 0.22150752258300782, 0.22146560668945312, 0.22119526672363282, 0.22161715698242188, 0.22124032592773438, 0.22137651062011718, 0.221770751953125, 0.22136524963378906, 0.22113690185546875, 0.22289407348632811, 0.22195814514160156, 0.22175027465820313, 0.22173286437988282, 0.2216048583984375, 0.221559814453125, 0.22163661193847656, 0.22160383605957032, 0.22145741271972658, 0.22292991638183593, 0.22169804382324218, 0.22141644287109374, 0.22166732788085938, 0.22217625427246093, 0.2222335968017578, 0.22209843444824218, 0.22188134765625, 0.2213939208984375, 0.22153727722167968, 0.46185061645507813, 0.2211788787841797, 0.22111436462402342, 0.22129049682617188, 0.22150553894042968, 0.22128128051757812, 0.2212024383544922, 0.22144000244140624, 0.22146969604492187, 0.22136114501953125, 0.22112562561035157, 0.2214481964111328, 0.2222335968017578, 0.22209536743164063, 0.22137548828125, 0.2215034942626953, 0.22125567626953124, 0.22210150146484375, 0.22135398864746095, 0.22115122985839844, 0.22146560668945312, 0.22135501098632812, 0.2211973114013672, 0.2217144317626953, 0.22152809143066407, 0.22151266479492188, 0.2218260498046875, 0.22138777160644532, 0.22115327453613282, 0.22133042907714845, 0.2219438018798828, 0.22181068420410155, 0.22151679992675782, 0.22163967895507813, 0.22198374938964843, 0.222202880859375, 0.2216089630126953, 0.22158848571777343, 0.22197964477539062, 0.2219304962158203, 0.2216058807373047, 0.22188236999511718, 0.2217144317626953, 0.2216990966796875, 0.22185162353515625, 0.22161509704589843, 0.22144825744628907, 0.2215761260986328, 0.22152294921875, 0.22147789001464843, 0.22122189331054687, 0.22149017333984375, 0.22142054748535156, 0.2215905303955078, 0.2212833251953125, 0.22145535278320314, 0.2214256591796875, 0.22370611572265625, 0.22159666442871093, 0.22157209777832032, 0.22164889526367187, 0.22127622985839843, 0.2215331268310547, 0.4607232055664062, 0.2214246368408203, 0.22150758361816406, 0.22157212829589842, 0.22139695739746093, 0.22143283081054688, 0.22193766784667968, 0.22149017333984375, 0.22146456909179688, 0.22140824890136718, 0.22149533081054687, 0.22149014282226562, 0.22156288146972655, 0.22154342651367187, 0.22170623779296875, 0.2220062713623047, 0.22157107543945312, 0.22163865661621093, 0.22165196228027345, 0.22187519836425781, 0.22198477172851563, 0.22197862243652344, 0.22181272888183592, 0.22163456726074218, 0.22144921875, 0.2217902069091797, 0.22170623779296875, 0.22131404113769532, 0.22122291564941407, 0.22189158630371095, 0.22152499389648436, 0.2215362548828125, 0.22123417663574219, 0.2213365783691406, 0.2216407012939453, 0.22166015625, 0.22149221801757812, 0.22126182556152343, 0.22392626953125, 0.2219438018798828, 0.22167347717285157, 0.22165606689453124, 0.22170930480957032, 0.22157626342773437, 0.2213846435546875, 0.22152499389648436, 0.2214307861328125, 0.2218014678955078, 0.22188134765625, 0.22133042907714845, 0.22165298461914062, 0.22161407470703126, 0.22176870727539064, 0.22166835021972656, 0.22149017333984375, 0.22150656127929688, 0.22176768493652343, 0.22180863952636717, 0.22148101806640624, 0.221297607421875, 0.22151373291015625, 0.222308349609375, 0.22171034240722656, 0.46220184326171876, 0.22253363037109375, 0.22171034240722656, 0.22214041137695312, 0.22114816284179686, 0.2212351989746094, 0.22153932189941405, 0.22129664611816408, 0.2211778564453125, 0.22154853820800782, 0.22134176635742187, 0.22164781188964844, 0.22147378540039062, 0.22127615356445313, 0.2212843475341797, 0.22165913391113282, 0.2215885467529297, 0.22137234497070313, 0.22157516479492187, 0.22207180786132813, 0.22170623779296875, 0.22145126342773438, 0.22118502807617188, 0.22266777038574218, 0.22152601623535156, 0.2213744659423828, 0.22128947448730468, 0.22164991760253908, 0.2218076171875, 0.22161920166015625, 0.22146969604492187, 0.22127513122558592, 0.22119424438476562, 0.22168269348144531, 0.22124441528320313, 0.22097100830078126, 0.221085693359375, 0.22113381958007813, 0.22170008850097656, 0.22122700500488282, 0.22127923583984374, 0.22145330810546876, 0.22255923461914062, 0.22140007019042968, 0.22144613647460937, 0.22161509704589843, 0.222023681640625, 0.22214041137695312, 0.22231552124023438, 0.2217574462890625, 0.22175949096679687, 0.22195610046386718, 0.22176666259765626, 0.221739013671875, 0.22195001220703126, 0.22145529174804687, 0.2223953857421875, 0.22163661193847656, 0.22160281372070312, 0.22152294921875, 0.22183322143554687, 0.2217840576171875, 0.2217410583496094, 0.46331597900390625, 0.22162431335449218, 0.22123930358886718, 0.2212843475341797, 0.2211409912109375, 0.22146662902832032, 0.22136729431152344, 0.22115020751953124, 0.22153216552734376, 0.22155059814453126, 0.22213427734375, 0.2212034606933594, 0.221412353515625, 0.22128640747070313, 0.22130995178222657, 0.22144102478027344, 0.22194688415527344, 0.22158131408691406, 0.22149632263183594, 0.22149427795410156, 0.2216222686767578, 0.22129664611816408, 0.22110617065429689, 0.22211993408203126, 0.2216058807373047, 0.22146456909179688, 0.22136012268066407, 0.22197760009765624, 0.2215905303955078, 0.22179327392578124, 0.2214993896484375, 0.22128230285644532, 0.2213017578125, 0.22127410888671875, 0.22146456909179688, 0.22121778869628905, 0.22168576049804686, 0.22120037841796875, 0.22118406677246094, 0.22135084533691407, 0.22131301879882812, 0.22129869079589845, 0.22116659545898437, 0.2210508728027344, 0.2215045166015625, 0.22192332458496095, 0.2215854034423828, 0.22149325561523436, 0.22160076904296874, 0.2215669708251953, 0.22166323852539063, 0.22128536987304687, 0.22132333374023438, 0.22139077758789064, 0.22124549865722656, 0.22128941345214845, 0.221665283203125, 0.22161613464355467, 0.22151577758789062, 0.22136524963378906, 0.2216407012939453, 0.22126693725585939, 0.22150860595703126, 0.4624425048828125, 0.22111744689941407, 0.22104473876953126, 0.2211829833984375, 0.22187826538085936, 0.22165196228027345, 0.2212290496826172, 0.22156083679199218, 0.22200729370117187, 0.22158950805664063, 0.22172671508789063, 0.221380615234375, 0.221306884765625, 0.22107449340820312, 0.22128428649902343, 0.22132121276855468, 0.221306884765625, 0.22197042846679688, 0.22143589782714843, 0.2213027801513672, 0.22142771911621092, 0.22136524963378906, 0.22115225219726561, 0.22165811157226561, 0.22134988403320313, 0.22114303588867187, 0.22162124633789063, 0.22177484130859376, 0.22148812866210937, 0.2213570556640625, 0.22117170715332032, 0.22172569274902343, 0.221444091796875, 0.22128640747070313, 0.22114816284179686, 0.22123423767089845, 0.22163040161132813, 0.22124134826660155, 0.22113900756835939, 0.22141433715820313, 0.22133351135253906, 0.22119833374023437, 0.22116659545898437, 0.2211727294921875, 0.2214256591796875, 0.22142771911621092, 0.22107034301757814, 0.2211973114013672, 0.22134579467773438, 0.22134988403320313, 0.2222950439453125, 0.22165298461914062, 0.22142361450195314, 0.22213938903808594, 0.22171749877929686, 0.2223206329345703, 0.2216816711425781, 0.22124032592773438, 0.22122700500488282, 0.22170623779296875, 0.22179840087890626, 0.22177587890625, 0.2214297637939453, 0.4629862365722656, 0.22168269348144531, 0.22135910034179687, 0.22179840087890626, 0.22143487548828125, 0.2214686737060547, 0.22133042907714845, 0.22189260864257812, 0.22169088745117188, 0.22153421020507813, 0.22115327453613282, 0.22121165466308593, 0.22176153564453124, 0.221380615234375, 0.22127308654785155, 0.22127206420898438, 0.2213959655761719, 0.22139085388183594, 0.22160592651367186, 0.2212945556640625, 0.22128128051757812, 0.2216611785888672, 0.22154751586914062, 0.22151373291015625, 0.2215004119873047, 0.22142361450195314, 0.22282957458496094, 0.22140313720703125, 0.22110617065429689, 0.22118502807617188, 0.22146456909179688, 0.2219325408935547, 0.22170828247070312, 0.22170008850097656, 0.22203187561035156, 0.22157005310058595, 0.22241897583007814, 0.22136521911621093, 0.22132940673828125, 0.22154444885253907, 0.22126284790039064, 0.22154444885253907, 0.22126080322265626, 0.22116966247558595, 0.22158335876464844, 0.22131199645996094, 0.2215188446044922, 0.22126797485351563, 0.22128128051757812, 0.22140518188476563, 0.22145126342773438, 0.221559814453125, 0.22204620361328126, 0.22181068420410155, 0.22146456909179688, 0.22160794067382814, 0.22161715698242188, 0.22150553894042968, 0.22128640747070313, 0.22156185913085938, 0.22152806091308594, 0.22130995178222657, 0.2214297637939453]",tokens/s,4.445155333725598,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3078-541f83767dc4d99a5393f1b9;d953a4b8-80e2-431d-a3c0-9c4c3c9f2371) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,2236.862464,2932.342784,0.0,2285.89568,2082.706944,s,10,2.5091812438964842,0.25091812438964844,0.0016359490817563227,0.25024176025390626,0.25317278594970705,0.2535381980895996,0.25383052780151366,"[0.25309158325195313, 0.2539036102294922, 0.24987110900878906, 0.24888946533203124, 0.2497696075439453, 0.24914796447753906, 0.2504080047607422, 0.25179104614257813, 0.25223333740234377, 0.2500755157470703]",tokens/s,1020.253122897013,kWh,2.948670335578402e-06,1.6157431971317612e-06,1.3066561943753649e-05,1.763097547646381e-05,tokens/kWh,14519899.953450851,MB,2238.38208,2959.60576,0.0,2313.158656,2180.685312,s,10,143.8805810546875,14.38805810546875,0.011353783216855234,14.382828125,14.40145888671875,14.403668896484376,14.405436904296876,"[14.4009677734375, 14.40587890625, 14.39833203125, 14.3805361328125, 14.374423828125, 14.3830166015625, 14.400140625, 14.373748046875, 14.3808974609375, 14.3826396484375]",tokens/s,4.378631191102458,kWh,0.000169606630340582,9.295822156634463e-05,0.0007504743544440634,0.0010130392063509902,tokens/kWh,62189.10344736671,,s,629,145.90565167236326,0.2319644700673502,0.029875164751919897,0.22824755859375,0.22927770080566406,0.22946876831054688,0.47820606079101563,"[0.22992076110839843, 0.22815129089355468, 0.22794752502441407, 0.22817485046386718, 0.2277969970703125, 0.22800997924804686, 0.22908108520507814, 0.22838169860839844, 0.2285506591796875, 0.2286049346923828, 0.22830694580078126, 0.22801516723632811, 0.22815020751953125, 0.22767308044433593, 0.22776422119140624, 0.2276741180419922, 0.22829362487792967, 0.22767205810546876, 0.22779493713378907, 0.2277058563232422, 0.22765670776367186, 0.22813594055175782, 0.22820147705078125, 0.2283335723876953, 0.2275809326171875, 0.22812364196777343, 0.2283008575439453, 0.22861715698242188, 0.2281564178466797, 0.22812979125976562, 0.2289090576171875, 0.2288476104736328, 0.22834585571289062, 0.22857522583007814, 0.2290155487060547, 0.22790553283691406, 0.22831922912597657, 0.22821784973144532, 0.22858444213867188, 0.2286612548828125, 0.22839808654785157, 0.22902169799804686, 0.22916812133789063, 0.22946917724609375, 0.22901350402832032, 0.22913536071777343, 0.22909747314453124, 0.229138427734375, 0.22936679077148436, 0.22892851257324218, 0.22936268615722658, 0.22932582092285156, 0.22920909118652344, 0.22904730224609374, 0.2291425323486328, 0.22898892211914063, 0.22877183532714843, 0.2286202850341797, 0.229064697265625, 0.22876876831054688, 0.2292131805419922, 0.22902476501464844, 0.48329931640625, 0.2282854461669922, 0.228822021484375, 0.2279741516113281, 0.22834687805175782, 0.22893157958984375, 0.22888038635253907, 0.22889677429199218, 0.22806629943847656, 0.22842678833007812, 0.22791778564453125, 0.22817791748046876, 0.22819737243652344, 0.228274169921875, 0.22817791748046876, 0.2278656005859375, 0.22779391479492186, 0.22778982543945311, 0.22769561767578125, 0.22855783081054687, 0.2280273895263672, 0.22766490173339843, 0.2277734375, 0.2279331817626953, 0.22785125732421874, 0.2278594512939453, 0.227852294921875, 0.2281492462158203, 0.2276505584716797, 0.22739762878417968, 0.22808883666992188, 0.22794650268554686, 0.22817485046386718, 0.228094970703125, 0.22861415100097657, 0.22928793334960937, 0.22927769470214843, 0.22896333312988282, 0.22915072631835937, 0.22908006286621094, 0.22935040283203126, 0.22938829040527345, 0.22915583801269532, 0.22902578735351561, 0.22928282165527344, 0.22933914184570312, 0.23262924194335938, 0.22968319702148438, 0.22875648498535156, 0.22789529418945312, 0.2278666229248047, 0.2295767059326172, 0.22941900634765625, 0.22929306030273439, 0.22928282165527344, 0.22947532653808594, 0.2292490234375, 0.22923365783691407, 0.22946815490722655, 0.22925106811523438, 0.22942207336425782, 0.22925315856933592, 0.2293155517578125, 0.4786954345703125, 0.22789324951171874, 0.22778778076171874, 0.22853427124023437, 0.2276505584716797, 0.22818611145019532, 0.22808677673339844, 0.22816160583496095, 0.22942201232910156, 0.22774887084960938, 0.22915277099609374, 0.2292162628173828, 0.22927772521972656, 0.22797821044921876, 0.2286940155029297, 0.22868377685546876, 0.22756658935546875, 0.22776832580566406, 0.22760652160644532, 0.22787379455566406, 0.2284400634765625, 0.22826905822753907, 0.22876876831054688, 0.22959616088867188, 0.22788198852539063, 0.22754815673828124, 0.2290493469238281, 0.22905958557128905, 0.22827314758300782, 0.22785842895507813, 0.22869094848632812, 0.2282854461669922, 0.22885580444335937, 0.22893157958984375, 0.2289971160888672, 0.22875340270996095, 0.22852301025390626, 0.22836224365234375, 0.22938829040527345, 0.2279147491455078, 0.22865306091308593, 0.22844825744628905, 0.22864588928222657, 0.22837759399414062, 0.2290636749267578, 0.2289213409423828, 0.22904115295410157, 0.23109939575195312, 0.22880665588378907, 0.22881996154785156, 0.2284707794189453, 0.229232666015625, 0.22840829467773438, 0.2283520050048828, 0.23005081176757813, 0.22963821411132812, 0.22803654479980467, 0.22828851318359375, 0.22792909240722656, 0.22795878601074218, 0.22777548217773438, 0.22785331726074218, 0.22797004699707032, 0.47824075317382814, 0.22801408386230468, 0.22845132446289063, 0.22803660583496094, 0.22865408325195313, 0.22791679382324218, 0.22993101501464844, 0.22821376037597657, 0.22777548217773438, 0.2276874237060547, 0.22872781372070314, 0.22810418701171875, 0.22863462829589845, 0.2276290588378906, 0.22807244873046875, 0.2276546630859375, 0.22767514038085937, 0.22787481689453126, 0.22811648559570313, 0.22791270446777342, 0.22781336975097657, 0.22764236450195313, 0.2276822967529297, 0.22771916198730469, 0.22807347106933593, 0.22767718505859375, 0.22773248291015624, 0.2274396209716797, 0.2276177978515625, 0.2276126708984375, 0.22771405029296876, 0.2276884460449219, 0.22757273864746094, 0.22881689453125, 0.22877593994140624, 0.22816461181640624, 0.22797311401367187, 0.22859878540039064, 0.22825677490234375, 0.22843699645996093, 0.22875750732421876, 0.22935040283203126, 0.22820352172851563, 0.22832333374023436, 0.23120999145507812, 0.22912101745605468, 0.22907904052734376, 0.22970060729980468, 0.22873907470703125, 0.22823014831542968, 0.22959414672851564, 0.22853219604492186, 0.22784101867675782, 0.22805708312988282, 0.22899507141113282, 0.2288885803222656, 0.227852294921875, 0.2276669464111328, 0.2279219207763672, 0.22771916198730469, 0.22784921264648436, 0.2281492462158203, 0.22833561706542968, 0.47811685180664065, 0.22757478332519532, 0.2276433868408203, 0.227662841796875, 0.22771510314941407, 0.22770684814453124, 0.22767001342773438, 0.2279884796142578, 0.22770176696777344, 0.2278707275390625, 0.22784512329101564, 0.22757171630859374, 0.22767922973632812, 0.22853836059570312, 0.22817181396484376, 0.22842568969726562, 0.22801408386230468, 0.2303057861328125, 0.22843084716796874, 0.22846669006347656, 0.22829670715332032, 0.227631103515625, 0.22766796875, 0.22799462890625, 0.22806732177734376, 0.22791270446777342, 0.22781747436523436, 0.22777548217773438, 0.22781234741210937, 0.22849331665039063, 0.22844009399414061, 0.22815536499023437, 0.22805509948730468, 0.22854751586914063, 0.22936679077148436, 0.2276741180419922, 0.22772940063476563, 0.22759014892578125, 0.22804173278808593, 0.22774169921875, 0.22859373474121095, 0.22844819641113281, 0.22795266723632812, 0.22765052795410157, 0.22817893981933593, 0.22788096618652343, 0.2275768280029297, 0.227915771484375, 0.22789631652832032, 0.22774783325195314, 0.22783692932128907, 0.22787890625, 0.22841958618164063, 0.22780006408691406, 0.22799154663085938, 0.2284862060546875, 0.22839187622070312, 0.23194111633300782, 0.22911180114746094, 0.22951731872558595, 0.22850355529785157, 0.22849740600585938, 0.22812570190429687, 0.47752294921875, 0.2276556854248047, 0.22764544677734375, 0.22774169921875, 0.22828953552246095, 0.22794444274902342, 0.22778880310058594, 0.2285332489013672, 0.22908927917480468, 0.2287073211669922, 0.2291087341308594, 0.22870118713378906, 0.22815948486328125, 0.22893466186523437, 0.2281328582763672, 0.22798233032226561, 0.22857215881347656, 0.22788812255859375, 0.2276177978515625, 0.22820658874511718, 0.22790963745117188, 0.2278912353515625, 0.22779286193847656, 0.22789631652832032, 0.2286622772216797, 0.2280079345703125, 0.22793728637695312, 0.22807142639160155, 0.22804888916015625, 0.2280335388183594, 0.22812159729003906, 0.2278830108642578, 0.2276884460449219, 0.22792807006835938, 0.22778675842285157, 0.22810009765625, 0.22793215942382813, 0.22936473083496095, 0.23167283630371094, 0.22877081298828125, 0.22863258361816408, 0.22877593994140624, 0.22783282470703126, 0.2277181396484375, 0.22791270446777342, 0.22783183288574219, 0.22780720520019532, 0.22802841186523437, 0.2279536590576172, 0.2279619140625, 0.22795872497558595, 0.22792909240722656, 0.22810009765625, 0.22938214111328126, 0.22828031921386718, 0.228242431640625, 0.2287636413574219, 0.22942311096191406, 0.22908114624023437, 0.22857618713378905, 0.22855372619628905, 0.22922035217285155, 0.2285117492675781, 0.4789893188476563, 0.22813900756835936, 0.22828134155273438, 0.22873292541503906, 0.2284707794189453, 0.22896640014648437, 0.22858956909179687, 0.2285096893310547, 0.22846669006347656, 0.22824858093261718, 0.2286878662109375, 0.2290083770751953, 0.22912716674804687, 0.22828851318359375, 0.22853836059570312, 0.228384765625, 0.22901145935058595, 0.2284523468017578, 0.22929306030273439, 0.22884352111816406, 0.2288916473388672, 0.22799874877929688, 0.22763005065917968, 0.2294476776123047, 0.22789324951171874, 0.22849740600585938, 0.2280437774658203, 0.22870835876464843, 0.228890625, 0.228600830078125, 0.22893466186523437, 0.22901248168945312, 0.22887936401367187, 0.22905445861816406, 0.2291025848388672, 0.22861824035644532, 0.2289040069580078, 0.22949267578125, 0.22833255004882813, 0.22794650268554686, 0.2277928924560547, 0.2285117492675781, 0.2278656005859375, 0.22826905822753907, 0.22805914306640626, 0.22932992553710937, 0.2291025848388672, 0.2288046112060547, 0.22843597412109376, 0.22823526000976563, 0.2282977294921875, 0.22911488342285155, 0.22893772888183594, 0.2283100128173828, 0.22832127380371095, 0.22796287536621093, 0.22796389770507813, 0.22786151123046874, 0.22802024841308594, 0.22792703247070312, 0.2290247344970703, 0.2290872344970703, 0.2293729248046875, 0.48112127685546874, 0.22774476623535156, 0.22767205810546876, 0.22760140991210936, 0.22770790100097657, 0.228706298828125, 0.22839602661132813, 0.22828953552246095, 0.22751437377929687, 0.22827008056640624, 0.22855577087402343, 0.22760858154296876, 0.22853733825683595, 0.2290882568359375, 0.22853427124023437, 0.22787174987792969, 0.22959922790527343, 0.22843597412109376, 0.22793830871582033, 0.22853427124023437, 0.22780621337890625, 0.22782566833496093, 0.2280755157470703, 0.22808781433105468, 0.22815948486328125, 0.22787686157226564, 0.22864691162109374, 0.2278481903076172, 0.22766387939453125, 0.22787174987792969, 0.22767514038085937, 0.22795161437988282, 0.22794650268554686, 0.22880563354492187, 0.22775808715820312, 0.22767514038085937, 0.22767718505859375, 0.2277734375, 0.22772122192382813, 0.22761984252929687, 0.2289653778076172, 0.22932582092285156, 0.2285506591796875, 0.22796083068847656, 0.2276986846923828, 0.22787583923339844, 0.2282608642578125, 0.2280263671875, 0.22794137573242187, 0.2278461456298828, 0.22779391479492186, 0.22773248291015624, 0.2276259765625, 0.22793215942382813, 0.22887014770507813, 0.2283756103515625, 0.22759724426269531, 0.22863462829589845, 0.22802841186523437, 0.228279296875, 0.22952755737304686, 0.22824755859375, 0.2283223114013672, 0.4810905456542969, 0.22825372314453124, 0.22767202758789062, 0.22804582214355468, 0.22862745666503906, 0.227810302734375, 0.22774989318847655, 0.22795980834960938, 0.22772940063476563, 0.22866943359375, 0.22906982421875, 0.2284390411376953, 0.22901657104492187, 0.22875852966308594, 0.22855270385742188, 0.22873805236816405, 0.22831513977050782, 0.22964530944824219, 0.2282782745361328, 0.228890625, 0.22893772888183594, 0.22941389465332032, 0.22863871765136717, 0.22856192016601562, 0.2286868438720703, 0.22785536193847655, 0.22773554992675782, 0.22780723571777345, 0.22774887084960938, 0.22783795166015625, 0.2286028747558594, 0.2292316131591797, 0.22855474853515625, 0.2284277801513672, 0.22815129089355468, 0.22762086486816407, 0.22760345458984374, 0.2294599609375, 0.228251708984375, 0.22796998596191406, 0.22916300964355468, 0.22799667358398437, 0.22786355590820312, 0.2276986846923828, 0.22783282470703126, 0.22780621337890625, 0.22774681091308593, 0.22811955261230468, 0.22787890625, 0.22774374389648439, 0.2275000305175781, 0.22767001342773438, 0.2276884460449219, 0.22773554992675782, 0.2278778839111328, 0.22794650268554686, 0.22774476623535156, 0.22773452758789062, 0.22781234741210937, 0.22782975769042968, 0.22782054138183594, 0.22786151123046874, 0.22886604309082031, 0.4816783447265625, 0.22901145935058595, 0.22866943359375, 0.2290145263671875, 0.22938316345214843, 0.22859571838378906, 0.22785023498535156, 0.22841139221191406, 0.22864895629882812, 0.2287615966796875, 0.22899302673339844, 0.2279720916748047, 0.2287073211669922, 0.22850355529785157, 0.22853631591796875, 0.22866021728515626, 0.22911077880859376, 0.22943026733398436, 0.22803558349609376, 0.22808781433105468, 0.2292725830078125, 0.22914457702636717, 0.22839295959472655, 0.22797314453125, 0.22791267395019532, 0.22795161437988282, 0.22988902282714843, 0.2280447998046875, 0.22778163146972658, 0.2276444091796875, 0.2277928924560547, 0.2285015106201172, 0.22870835876464843, 0.22830181884765624, 0.22769766235351563, 0.22775193786621092, 0.22775091552734375, 0.22768333435058594, 0.22769664001464843, 0.22772633361816405, 0.22788607788085938, 0.22782054138183594, 0.22759837341308595, 0.22872572326660157, 0.2291568603515625, 0.22867762756347657, 0.228094970703125, 0.2280990753173828, 0.22783795166015625, 0.2277724151611328, 0.22763827514648438, 0.2278164520263672, 0.22774681091308593, 0.2276433868408203, 0.2277181396484375, 0.22805708312988282, 0.2277232666015625, 0.228068359375, 0.2278154296875, 0.2276864013671875, 0.22811546325683593, 0.22927462768554688, 0.2289459228515625]",tokens/s,4.311005041891343,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 900, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 797, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 477, in forward mlp_output = self.mlp(mlp_layernorm_out) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 409, in forward x = self.act(self.dense_h_to_4h(x)) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 242, in forward out = WQLinearMMFunction.apply( File ""/usr/local/lib/python3.10/dist-packages/torch/autograd/function.py"", line 598, in apply return super().apply(*args, **kwargs) # type: ignore[misc] File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 50, in forward out = dequantize_gemm(qweight, qzeros, scales, w_bit, group_size) File ""/usr/local/lib/python3.10/dist-packages/awq/utils/packing_utils.py"", line 85, in dequantize_gemm iweight, izeros = unpack_awq(qweight, qzeros, bits) File ""/usr/local/lib/python3.10/dist-packages/awq/utils/packing_utils.py"", line 12, in unpack_awq iweights = torch.bitwise_right_shift(qweight[:, :, None], shifts[None, None, :]).to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 GiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3492-2921c8922b45bd3b18b878c7;eade41e6-8580-42ea-8f9b-04be12d11623) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1400.946688,4467.458048,0.0,3821.010944,3588.539904,s,10,2.8509368591308593,0.285093685913086,0.0018864014341163783,0.28445895385742187,0.2852971740722656,0.28801900939941405,0.29019647766113277,"[0.2907408447265625, 0.28459954833984374, 0.28446585083007814, 0.2842637329101563, 0.28442007446289064, 0.2843152465820312, 0.2845796203613281, 0.2844075622558594, 0.28469232177734377, 0.2844520568847656]",tokens/s,897.9504375205436,kWh,3.3620131512482964e-06,1.8420311789971794e-06,1.613754377422258e-05,2.134158810446806e-05,tokens/kWh,11995358.487234792,MB,1400.946688,4467.458048,0.0,3821.010944,3698.94656,s,10,165.60105078125,16.560105078124998,0.002304538991507171,16.5600693359375,16.5629611328125,16.56372666015625,16.56433908203125,"[16.5644921875, 16.559681640625, 16.560998046875, 16.557119140625, 16.55881640625, 16.558265625, 16.56045703125, 16.562791015625, 16.556978515625, 16.561451171875]",tokens/s,3.804323686521747,kWh,0.00019549797135922647,0.00010714875620388434,0.0009433736405007801,0.0012460203680638908,tokens/kWh,50560.97124470891,,s,629,167.89605535888654,0.2669253662303445,0.03378333259823164,0.26283621215820313,0.2630875061035156,0.263170263671875,0.5471718627929687,"[0.26277273559570313, 0.26256280517578123, 0.2627635192871094, 0.2625884094238281, 0.262614013671875, 0.26269287109375, 0.26282803344726563, 0.26283111572265627, 0.2626437072753906, 0.26271026611328124, 0.2626826171875, 0.2628269958496094, 0.2629119873046875, 0.2628935546875, 0.26298162841796874, 0.26274612426757815, 0.2627860412597656, 0.26264779663085935, 0.26285055541992186, 0.26276556396484374, 0.26288433837890623, 0.2632120361328125, 0.2632437744140625, 0.262935546875, 0.26322021484375, 0.2628751220703125, 0.26288433837890623, 0.2628751220703125, 0.2629795837402344, 0.26279833984375, 0.263130126953125, 0.2629437561035156, 0.263046142578125, 0.26281777954101565, 0.26273895263671876, 0.26270721435546873, 0.26306353759765627, 0.2627993469238281, 0.263014404296875, 0.26284442138671876, 0.26303692626953123, 0.26287103271484374, 0.26279220581054685, 0.26292633056640624, 0.2630000610351563, 0.2632509460449219, 0.2630911865234375, 0.26301644897460935, 0.2629447631835938, 0.2627860412597656, 0.26292837524414064, 0.26297344970703124, 0.2631321716308594, 0.26289767456054686, 0.26301132202148436, 0.2628464660644531, 0.26280755615234375, 0.2627205200195312, 0.2628055114746094, 0.26311065673828127, 0.26308914184570314, 0.26280755615234375, 0.5474641723632813, 0.2627502136230469, 0.262677490234375, 0.26272564697265627, 0.26257305908203127, 0.2627666015625, 0.26281777954101565, 0.26266726684570313, 0.2627686462402344, 0.26266009521484374, 0.2626662292480469, 0.26324069213867185, 0.2628741149902344, 0.26273382568359377, 0.26285772705078125, 0.2628853759765625, 0.26265701293945315, 0.2628894653320312, 0.2630328369140625, 0.26302566528320315, 0.26280856323242185, 0.2628055114746094, 0.2626734008789062, 0.2630440979003906, 0.2628382568359375, 0.26280038452148435, 0.2630901794433594, 0.26281573486328125, 0.2629386291503906, 0.2631526489257813, 0.2629437561035156, 0.2629140625, 0.26282803344726563, 0.2627686462402344, 0.26325503540039064, 0.26316903686523435, 0.26300314331054686, 0.26308096313476564, 0.263041015625, 0.26282290649414064, 0.26275430297851565, 0.26275942993164064, 0.2629099426269531, 0.26284542846679687, 0.26273995971679687, 0.2628136901855469, 0.2626826171875, 0.26290689086914065, 0.26275225830078125, 0.2628515930175781, 0.26283416748046873, 0.2628751220703125, 0.2627901306152344, 0.26285772705078125, 0.26277273559570313, 0.26280142211914065, 0.26279833984375, 0.2628833312988281, 0.26294168090820313, 0.2629375915527344, 0.26277273559570313, 0.26275839233398435, 0.2627430419921875, 0.547251220703125, 0.2627635192871094, 0.26263143920898435, 0.2626652221679687, 0.2625669250488281, 0.2627276916503906, 0.2626590576171875, 0.26271026611328124, 0.2630768737792969, 0.26291915893554685, 0.26271435546875, 0.2629437561035156, 0.2627635192871094, 0.2625976257324219, 0.262697998046875, 0.2629939270019531, 0.2626723937988281, 0.2627799072265625, 0.26286285400390624, 0.26268978881835936, 0.2628239440917969, 0.2629273681640625, 0.2628853759765625, 0.2630993957519531, 0.2629744567871094, 0.26282290649414064, 0.26280856323242185, 0.2628874206542969, 0.26281573486328125, 0.2628106384277344, 0.26278707885742186, 0.2631178283691406, 0.2629519348144531, 0.26300827026367185, 0.2628822937011719, 0.262793212890625, 0.2627358703613281, 0.262898681640625, 0.2627635192871094, 0.2630543212890625, 0.26283621215820313, 0.2628751220703125, 0.2629508972167969, 0.2629242858886719, 0.26298675537109373, 0.26301336669921876, 0.2632284240722656, 0.26328472900390626, 0.2629119873046875, 0.2631649169921875, 0.26297854614257815, 0.26286181640625, 0.26307379150390625, 0.26298880004882813, 0.26273995971679687, 0.26296728515625, 0.26279730224609377, 0.2629386291503906, 0.26305535888671877, 0.262835205078125, 0.2627409973144531, 0.26296728515625, 0.26286181640625, 0.5470679321289063, 0.26265496826171875, 0.2626447448730469, 0.26254437255859375, 0.2625976257324219, 0.26270513916015625, 0.2626662292480469, 0.2626078796386719, 0.2626385803222656, 0.2627911682128906, 0.26279525756835936, 0.26274713134765626, 0.26267544555664063, 0.2626795654296875, 0.26265396118164064, 0.2627348327636719, 0.2626273193359375, 0.26269287109375, 0.26267648315429687, 0.26258740234375, 0.26294168090820313, 0.2628188171386719, 0.2626805725097656, 0.26300927734375, 0.26291915893554685, 0.2627901306152344, 0.2627624816894531, 0.2626580505371094, 0.26270925903320314, 0.26276556396484374, 0.2627819519042969, 0.26282803344726563, 0.26264779663085935, 0.26278707885742186, 0.26265087890625, 0.26304510498046874, 0.26298162841796874, 0.26296728515625, 0.2629273681640625, 0.26303079223632814, 0.26278091430664063, 0.262898681640625, 0.2630758361816406, 0.2631127014160156, 0.2629908447265625, 0.2628485107421875, 0.2629222412109375, 0.2627747802734375, 0.26271026611328124, 0.2629294128417969, 0.26290277099609377, 0.2629468078613281, 0.2629150695800781, 0.2629375915527344, 0.262803466796875, 0.2627768249511719, 0.26270721435546873, 0.26287307739257815, 0.2632929382324219, 0.2630379638671875, 0.2627747802734375, 0.26309222412109373, 0.26279730224609377, 0.5472327880859374, 0.2629273681640625, 0.2627839965820313, 0.26275225830078125, 0.26269183349609376, 0.26259249877929686, 0.26274508666992186, 0.26279425048828126, 0.2629375915527344, 0.2630973510742188, 0.26277069091796873, 0.2626744384765625, 0.26267544555664063, 0.2627768249511719, 0.2626908264160156, 0.2628167724609375, 0.2626355285644531, 0.2627747802734375, 0.2627993469238281, 0.26275942993164064, 0.2628392944335938, 0.26277069091796873, 0.2627286987304687, 0.26279730224609377, 0.2626713562011719, 0.26264984130859376, 0.2627358703613281, 0.26286590576171875, 0.26278091430664063, 0.2628464660644531, 0.2626447448730469, 0.2627850341796875, 0.2631075744628906, 0.26286285400390624, 0.26297549438476564, 0.26297650146484375, 0.26297344970703124, 0.26278912353515627, 0.2626252746582031, 0.2630748291015625, 0.26306149291992187, 0.26298880004882813, 0.2631884765625, 0.26296218872070315, 0.2627358703613281, 0.26285055541992186, 0.26278912353515627, 0.26330316162109374, 0.26291302490234375, 0.26285055541992186, 0.2627799072265625, 0.2628055114746094, 0.2628536376953125, 0.2628884582519531, 0.26296319580078126, 0.26335232543945314, 0.262930419921875, 0.26275326538085936, 0.2627911682128906, 0.262835205078125, 0.26279833984375, 0.2627778625488281, 0.2626990051269531, 0.547040283203125, 0.2626211853027344, 0.2626027526855469, 0.26274713134765626, 0.26255462646484373, 0.26267034912109377, 0.2627778625488281, 0.2628802490234375, 0.26282907104492187, 0.2628853759765625, 0.26311578369140626, 0.2628106384277344, 0.262761474609375, 0.26274200439453127, 0.262645751953125, 0.2627491760253906, 0.2628413391113281, 0.2627215270996094, 0.262719482421875, 0.26273690795898436, 0.2629324951171875, 0.26294168090820313, 0.26289767456054686, 0.2629847106933594, 0.26286590576171875, 0.262866943359375, 0.2628167724609375, 0.2628802490234375, 0.262972412109375, 0.2628741149902344, 0.26290072631835937, 0.2630010986328125, 0.26286489868164065, 0.2628536376953125, 0.26311883544921877, 0.26306561279296875, 0.26278912353515627, 0.2628884582519531, 0.2627799072265625, 0.2629795837402344, 0.26278912353515627, 0.2628812866210937, 0.26278912353515627, 0.2628106384277344, 0.26293145751953123, 0.26276760864257814, 0.2627758178710938, 0.2628536376953125, 0.26280755615234375, 0.26282086181640624, 0.26271026611328124, 0.26280242919921876, 0.26279730224609377, 0.2627082214355469, 0.26268365478515626, 0.2629601135253906, 0.26269287109375, 0.2627307434082031, 0.26279525756835936, 0.26303079223632814, 0.26300314331054686, 0.262866943359375, 0.2628792419433594, 0.5475604248046875, 0.2626938781738281, 0.262719482421875, 0.26291915893554685, 0.26270309448242185, 0.262719482421875, 0.26266827392578124, 0.2629447631835938, 0.26271743774414064, 0.2627205200195312, 0.262645751953125, 0.26268161010742186, 0.26261196899414063, 0.2626805725097656, 0.26280960083007815, 0.26294784545898436, 0.2627061767578125, 0.26282803344726563, 0.26302462768554685, 0.2627491760253906, 0.2626959228515625, 0.2627440490722656, 0.26291915893554685, 0.26292120361328125, 0.26270721435546873, 0.26285260009765626, 0.26279525756835936, 0.26292327880859373, 0.2627440490722656, 0.26276043701171875, 0.26281268310546874, 0.2629427185058594, 0.26270925903320314, 0.2629847106933594, 0.26284442138671876, 0.2630963134765625, 0.26291815185546874, 0.2629662780761719, 0.26276556396484374, 0.2630348815917969, 0.2629161071777344, 0.26322738647460936, 0.26300827026367185, 0.26284033203125, 0.26286798095703123, 0.2630676574707031, 0.2629591064453125, 0.26298062133789063, 0.2627184753417969, 0.2629345397949219, 0.2627491760253906, 0.26317108154296875, 0.2627276916503906, 0.26300619506835937, 0.26283621215820313, 0.26308709716796874, 0.26335028076171874, 0.26290585327148436, 0.262793212890625, 0.2628751220703125, 0.2630625305175781, 0.262866943359375, 0.26292120361328125, 0.5473106079101563, 0.262751220703125, 0.26283111572265627, 0.2626652221679687, 0.2627184753417969, 0.26272357177734373, 0.26292327880859373, 0.26301031494140625, 0.26290585327148436, 0.26286181640625, 0.26279220581054685, 0.2627553405761719, 0.2628884582519531, 0.26308914184570314, 0.26276556396484374, 0.2630502319335937, 0.26282803344726563, 0.26288641357421877, 0.2627993469238281, 0.26274200439453127, 0.26290277099609377, 0.2628853759765625, 0.2627716979980469, 0.2627716979980469, 0.2627010498046875, 0.26282803344726563, 0.2629017639160156, 0.26265087890625, 0.262935546875, 0.2629508972167969, 0.2630492248535156, 0.26289971923828126, 0.2630379638671875, 0.2628894653320312, 0.2629601135253906, 0.2628935546875, 0.2628833312988281, 0.26278707885742186, 0.26283724975585937, 0.2631280517578125, 0.2629242858886719, 0.26284237670898436, 0.2629294128417969, 0.262930419921875, 0.2628167724609375, 0.2629847106933594, 0.26290585327148436, 0.26296832275390625, 0.2628269958496094, 0.2630911865234375, 0.2628587646484375, 0.26311578369140626, 0.2628044738769531, 0.2630000610351563, 0.26291915893554685, 0.26306866455078126, 0.26301644897460935, 0.2631628723144531, 0.2630225830078125, 0.26312396240234376, 0.26298367309570314, 0.26306561279296875, 0.2629017639160156, 0.547757080078125, 0.2626713562011719, 0.26260171508789065, 0.262729736328125, 0.26323355102539064, 0.2629171142578125, 0.2627225646972656, 0.2627696533203125, 0.262856689453125, 0.2626641845703125, 0.26276760864257814, 0.2626375732421875, 0.2626744384765625, 0.2626744384765625, 0.26258636474609376, 0.262724609375, 0.2627061767578125, 0.26262835693359377, 0.26265087890625, 0.26280856323242185, 0.2628044738769531, 0.26286181640625, 0.2627768249511719, 0.26277069091796873, 0.2627133483886719, 0.26289971923828126, 0.26298880004882813, 0.2631403503417969, 0.2628608093261719, 0.26283416748046873, 0.26289254760742187, 0.26295297241210935, 0.2626713562011719, 0.2626641845703125, 0.2626150512695313, 0.26276455688476563, 0.26270413208007815, 0.2628331604003906, 0.262793212890625, 0.26271539306640623, 0.2627286987304687, 0.2627010498046875, 0.26297854614257815, 0.26323251342773435, 0.26312295532226565, 0.2629427185058594, 0.2632806396484375, 0.2629427185058594, 0.26271435546875, 0.26275326538085936, 0.2626631774902344, 0.262866943359375, 0.2626744384765625, 0.26271539306640623, 0.26262631225585936, 0.26273895263671876, 0.26274713134765626, 0.2630215759277344, 0.26290380859375, 0.2628802490234375, 0.26276760864257814, 0.26292837524414064, 0.26273995971679687, 0.5472122802734375, 0.262940673828125, 0.2628536376953125, 0.2628188171386719, 0.26270513916015625, 0.26295501708984376, 0.2627082214355469, 0.26284954833984375, 0.26267852783203127, 0.2627666015625, 0.26269491577148435, 0.2628239440917969, 0.26267034912109377, 0.26264883422851565, 0.2627061767578125, 0.2629591064453125, 0.2631485290527344, 0.26324786376953124, 0.26280755615234375, 0.26280960083007815, 0.26319256591796875, 0.26267852783203127, 0.2628167724609375, 0.2628433837890625, 0.26269287109375, 0.26274713134765626, 0.2629744567871094, 0.26298367309570314, 0.26273178100585937, 0.26305227661132813, 0.26277886962890623, 0.26314138793945313, 0.26285260009765626, 0.26283621215820313, 0.2628055114746094, 0.26295297241210935, 0.2627491760253906, 0.2626693115234375, 0.26276043701171875, 0.26314752197265623, 0.26287820434570314, 0.26291302490234375, 0.26278912353515627, 0.2630154113769531, 0.2626723937988281, 0.2631383056640625, 0.2627901306152344, 0.26301644897460935, 0.2626805725097656, 0.2629847106933594, 0.262761474609375, 0.26287103271484374, 0.26281268310546874, 0.26283724975585937, 0.2633441162109375, 0.2631127014160156, 0.2628884582519531, 0.26285260009765626, 0.26326937866210937, 0.26319155883789064, 0.2628894653320312, 0.26291815185546874, 0.2629099426269531]",tokens/s,3.7463655632377972,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fbe-4784ecb05440970f6d6c1b76;c6c1ce15-dceb-4b79-962b-bccfaabe5ab9) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1888.280576,15194.390528,0.0,14547.943424,13898.252288,s,10,16.972713500976564,1.6972713500976564,0.0011802396473078822,1.6969771728515626,1.6988580200195311,1.699245782470703,1.6995559924316406,"[1.6969825439453126, 1.6969718017578126, 1.6960919189453125, 1.6958106689453125, 1.6964619140625, 1.69635986328125, 1.6974122314453124, 1.6982171630859375, 1.699633544921875, 1.6987718505859375]",tokens/s,150.83033127570937,kWh,2.0026979611979592e-05,1.097496669508473e-05,9.603446571639917e-05,0.0001270364120234635,tokens/kWh,2015170.2643547354,MB,1888.280576,15194.390528,0.0,14547.943424,14315.97312,s,10,986.8612499999999,98.68612499999999,0.014850841233799409,98.68501953125,98.70599609374999,98.707103515625,98.707989453125,"[98.6797109375, 98.687671875, 98.6747890625, 98.6555546875, 98.6919140625, 98.6783203125, 98.6969609375, 98.7082109375, 98.70575, 98.6823671875]",tokens/s,0.6383876152802637,kWh,0.0011649224273860455,0.0006384806384544572,0.00558826860950019,0.007391671675340693,tokens/kWh,8523.105836826315,,s,629,1000.5712542724599,1.5907333136287136,0.2015788363996436,1.5663739013671876,1.5674868164062499,1.5676927978515625,3.262457841796875,"[1.5671644287109374, 1.5661424560546875, 1.56664013671875, 1.5672913818359375, 1.566525390625, 1.5666636962890625, 1.56687158203125, 1.566798828125, 1.565517822265625, 1.56556494140625, 1.56592333984375, 1.56533251953125, 1.5655465087890625, 1.5658004150390625, 1.565998046875, 1.5670538330078125, 1.5664508056640625, 1.56584033203125, 1.5667855224609375, 1.5674542236328124, 1.567055908203125, 1.5672022705078126, 1.5673907470703126, 1.56657666015625, 1.5663001708984374, 1.566665771484375, 1.5659376220703125, 1.5659520263671876, 1.5661055908203125, 1.566899169921875, 1.5660892333984375, 1.566604248046875, 1.5656611328125, 1.5667138671875, 1.565654052734375, 1.566857177734375, 1.5661802978515624, 1.5664854736328124, 1.5667210693359375, 1.5670538330078125, 1.56666162109375, 1.566857177734375, 1.5659581298828125, 1.565955078125, 1.5662294921875, 1.5661065673828125, 1.566191650390625, 1.5659642333984376, 1.5664527587890624, 1.5659898681640625, 1.5656510009765625, 1.5664803466796875, 1.566614501953125, 1.566317626953125, 1.5665059814453124, 1.565919189453125, 1.5657093505859374, 1.5657308349609376, 1.5665244140625, 1.565981689453125, 1.5660799560546874, 1.56632373046875, 3.263318115234375, 1.5650416259765625, 1.5667425537109374, 1.5671868896484376, 1.5674766845703125, 1.566899169921875, 1.5670477294921874, 1.567247314453125, 1.5669462890625, 1.566482421875, 1.56721044921875, 1.567352783203125, 1.56718603515625, 1.5668541259765625, 1.565632568359375, 1.5655035400390624, 1.565454345703125, 1.566171142578125, 1.5655423583984376, 1.5656632080078126, 1.565739013671875, 1.5655546875, 1.56594384765625, 1.565739990234375, 1.5656090087890624, 1.5654093017578126, 1.5656141357421876, 1.5657379150390625, 1.565550537109375, 1.5656468505859376, 1.566182373046875, 1.56573388671875, 1.5660994873046874, 1.566614501953125, 1.5671705322265626, 1.5668858642578125, 1.5673907470703126, 1.5655372314453124, 1.5656827392578125, 1.567224853515625, 1.5674173583984374, 1.567141845703125, 1.567562744140625, 1.56704150390625, 1.566593994140625, 1.5660902099609375, 1.56824267578125, 1.56689404296875, 1.5673763427734375, 1.5663564453125, 1.566992431640625, 1.567836181640625, 1.5679334716796876, 1.5667803955078126, 1.5659478759765626, 1.566908447265625, 1.566277587890625, 1.5658690185546875, 1.5664803466796875, 1.5664271240234375, 1.5662049560546876, 1.5662171630859374, 1.5660595703125, 3.26247021484375, 1.5656990966796875, 1.5657799072265626, 1.566017578125, 1.5667598876953126, 1.565744140625, 1.566393310546875, 1.56580859375, 1.56586083984375, 1.5654072265625, 1.56617529296875, 1.5659202880859375, 1.5659725341796875, 1.5665029296875, 1.5664486083984375, 1.5655260009765626, 1.5660780029296875, 1.5663062744140626, 1.5667117919921876, 1.566983154296875, 1.5657728271484375, 1.565895751953125, 1.566373779296875, 1.5677890625, 1.566457763671875, 1.5663277587890625, 1.566962646484375, 1.566697509765625, 1.5665489501953125, 1.566899169921875, 1.56623046875, 1.566688232421875, 1.5666124267578125, 1.5657093505859374, 1.566341064453125, 1.5660462646484374, 1.5663482666015625, 1.566123046875, 1.5662838134765624, 1.5664619140625, 1.56621826171875, 1.566017578125, 1.5659622802734374, 1.565760498046875, 1.5661669921875, 1.566396484375, 1.5663912353515625, 1.5682620849609374, 1.5664476318359375, 1.5661475830078124, 1.56608203125, 1.56626123046875, 1.5659765625, 1.5659632568359374, 1.565811767578125, 1.56657763671875, 1.5658956298828124, 1.5657799072265626, 1.5660042724609375, 1.566275634765625, 1.5663533935546874, 1.56682958984375, 1.566255126953125, 3.26236865234375, 1.5657420654296874, 1.5653990478515625, 1.5662427978515625, 1.565697021484375, 1.5651962890625, 1.56592333984375, 1.5652525634765626, 1.5654676513671875, 1.5651666259765624, 1.56598583984375, 1.5653150634765625, 1.5663011474609374, 1.5655894775390624, 1.5655516357421875, 1.565338623046875, 1.5655577392578126, 1.5656806640625, 1.56554345703125, 1.56598779296875, 1.5655843505859375, 1.5654718017578124, 1.5656263427734376, 1.5660400390625, 1.5658824462890626, 1.5657850341796875, 1.5657625732421876, 1.5657196044921875, 1.56554443359375, 1.566350341796875, 1.5656375732421874, 1.5655997314453125, 1.566192626953125, 1.565496337890625, 1.5660390625, 1.566271484375, 1.5658936767578124, 1.565685791015625, 1.5660216064453125, 1.5656141357421876, 1.565828125, 1.566434326171875, 1.5664906005859376, 1.5654307861328125, 1.56860107421875, 1.56617529296875, 1.566096435546875, 1.5658311767578126, 1.5665806884765625, 1.5662591552734375, 1.5666104736328126, 1.56617724609375, 1.566076904296875, 1.5665244140625, 1.566587890625, 1.56634521484375, 1.5658741455078125, 1.5661434326171875, 1.566371826171875, 1.5663533935546874, 1.566328857421875, 1.5666011962890625, 1.5661240234375, 3.263153076171875, 1.56556494140625, 1.565706298828125, 1.5666165771484375, 1.566034912109375, 1.566002197265625, 1.5660831298828124, 1.565319091796875, 1.5665797119140625, 1.5655751953125, 1.5664332275390624, 1.5658896484375, 1.5659744873046875, 1.5658916015625, 1.566130126953125, 1.5668695068359375, 1.5666226806640624, 1.5660933837890625, 1.566509033203125, 1.5661363525390626, 1.5657738037109374, 1.5658199462890625, 1.5663206787109376, 1.5657840576171875, 1.566213134765625, 1.5667742919921874, 1.5661240234375, 1.5659315185546876, 1.565498291015625, 1.5660472412109374, 1.5656407470703124, 1.56573486328125, 1.566254150390625, 1.56568359375, 1.5663575439453126, 1.566123046875, 1.5667579345703124, 1.5663963623046875, 1.5667947998046876, 1.5661865234375, 1.5662919921875, 1.5661158447265624, 1.5670897216796875, 1.56710302734375, 1.567573974609375, 1.5670128173828124, 1.5670006103515626, 1.5675535888671874, 1.568489501953125, 1.5677030029296875, 1.5673804931640625, 1.5675074462890626, 1.5675924072265626, 1.56819970703125, 1.56786181640625, 1.5679241943359374, 1.5680113525390624, 1.5678955078125, 1.56760888671875, 1.5664681396484375, 1.5662049560546876, 1.5662213134765626, 1.5661905517578125, 3.262426025390625, 1.5655628662109375, 1.56493212890625, 1.5661669921875, 1.5657431640625, 1.5655537109375, 1.5660482177734374, 1.5657359619140625, 1.5666063232421874, 1.5661004638671876, 1.5656785888671876, 1.5670230712890625, 1.5668284912109376, 1.5666944580078126, 1.566755859375, 1.566581787109375, 1.5667763671875, 1.5658946533203124, 1.5655372314453124, 1.5659530029296875, 1.565708251953125, 1.5669320068359376, 1.56744091796875, 1.566341064453125, 1.5660360107421876, 1.5659837646484376, 1.5658629150390626, 1.5660892333984375, 1.565887451171875, 1.56593359375, 1.5656898193359374, 1.5661455078125, 1.566086181640625, 1.5658076171875, 1.5659263916015624, 1.5658218994140625, 1.56609033203125, 1.56613525390625, 1.5673641357421875, 1.5665848388671875, 1.5666175537109375, 1.5664681396484375, 1.5664619140625, 1.566224365234375, 1.5664588623046876, 1.5661363525390626, 1.5667579345703124, 1.5667978515625, 1.5667178955078125, 1.5658916015625, 1.5658680419921875, 1.5657728271484375, 1.5670528564453126, 1.567161376953125, 1.5674490966796875, 1.567394775390625, 1.567477783203125, 1.56653369140625, 1.5658792724609376, 1.56665966796875, 1.5675699462890624, 1.56628076171875, 1.5665899658203124, 3.26359765625, 1.5656744384765624, 1.565328369140625, 1.566392333984375, 1.565917236328125, 1.566066650390625, 1.567247314453125, 1.56674560546875, 1.567057861328125, 1.566623779296875, 1.5667916259765624, 1.5669647216796876, 1.56680810546875, 1.5664117431640625, 1.566813232421875, 1.5673231201171876, 1.566376953125, 1.5661844482421876, 1.567056884765625, 1.567025146484375, 1.566773193359375, 1.5668214111328125, 1.5673548583984376, 1.568668701171875, 1.567552490234375, 1.566614501953125, 1.566899169921875, 1.567447021484375, 1.5675135498046875, 1.5669381103515625, 1.5667794189453126, 1.567139892578125, 1.5667916259765624, 1.566656494140625, 1.566287841796875, 1.5665806884765625, 1.5664046630859374, 1.5664854736328124, 1.56649169921875, 1.56577587890625, 1.5662652587890624, 1.5666688232421875, 1.5665521240234375, 1.56607080078125, 1.5668214111328125, 1.5663809814453125, 1.5665684814453125, 1.5666226806640624, 1.566630859375, 1.5663360595703124, 1.566734375, 1.5659100341796874, 1.566339111328125, 1.5664271240234375, 1.5667056884765624, 1.5661854248046876, 1.5660308837890624, 1.56668310546875, 1.5665264892578126, 1.5664742431640626, 1.5667752685546874, 1.5658568115234375, 1.566477294921875, 3.26474853515625, 1.5660062255859375, 1.5659560546875, 1.5659263916015624, 1.5657471923828126, 1.5661905517578125, 1.5662623291015625, 1.5657779541015624, 1.5659345703125, 1.5662991943359375, 1.5659427490234374, 1.565487060546875, 1.566613525390625, 1.56559765625, 1.56596533203125, 1.566318603515625, 1.566983154296875, 1.5657728271484375, 1.5661322021484374, 1.566572509765625, 1.5666114501953126, 1.5659100341796874, 1.56704150390625, 1.56611376953125, 1.5677716064453124, 1.5673876953125, 1.5676068115234374, 1.5671654052734374, 1.5670262451171875, 1.5657738037109374, 1.5667547607421874, 1.5669217529296875, 1.5676558837890624, 1.56761083984375, 1.567972412109375, 1.567614990234375, 1.5675115966796875, 1.5676702880859374, 1.56752587890625, 1.567363037109375, 1.567826904296875, 1.567635498046875, 1.5676488037109375, 1.5662724609375, 1.5680296630859376, 1.567309814453125, 1.567677490234375, 1.567458251953125, 1.5668489990234375, 1.565865966796875, 1.566993408203125, 1.5673487548828124, 1.567581298828125, 1.566005126953125, 1.5679825439453126, 1.567009765625, 1.566161865234375, 1.5676138916015625, 1.56758935546875, 1.5667568359375, 1.5663258056640625, 1.566224365234375, 1.5670302734375, 3.26736279296875, 1.5667579345703124, 1.5664261474609376, 1.5665531005859374, 1.5668193359375, 1.566329833984375, 1.5663739013671876, 1.566214111328125, 1.5671173095703126, 1.565612060546875, 1.5653099365234375, 1.565854736328125, 1.5658486328125, 1.5660902099609375, 1.566256103515625, 1.5667547607421874, 1.5674736328125, 1.56900244140625, 1.5678065185546874, 1.567677490234375, 1.5673057861328126, 1.5676702880859374, 1.5678648681640626, 1.566625732421875, 1.566234619140625, 1.5665244140625, 1.565811767578125, 1.566467041015625, 1.5663380126953126, 1.5663995361328125, 1.56670458984375, 1.566562255859375, 1.56580859375, 1.5658076171875, 1.567499267578125, 1.566665771484375, 1.5675023193359374, 1.5676190185546874, 1.5666585693359374, 1.5654676513671875, 1.5674849853515624, 1.5671900634765625, 1.567220703125, 1.5676558837890624, 1.5666514892578125, 1.5663369140625, 1.5666441650390626, 1.566841796875, 1.566866455078125, 1.56685107421875, 1.567130615234375, 1.5664896240234376, 1.5665152587890625, 1.5667691650390625, 1.56735693359375, 1.5668797607421876, 1.5669544677734375, 1.565843505859375, 1.5657564697265625, 1.5662509765625, 1.5679661865234376, 1.567494140625, 1.5670753173828125, 3.26488671875, 1.56588232421875, 1.5666165771484375, 1.56659912109375, 1.5660400390625, 1.565919189453125, 1.5653775634765625, 1.5651685791015626, 1.5665255126953126, 1.5655526123046875, 1.56554443359375, 1.565875244140625, 1.565812744140625, 1.56569091796875, 1.56594384765625, 1.566340087890625, 1.565750244140625, 1.5661793212890625, 1.5670374755859375, 1.5664691162109374, 1.5666708984375, 1.5669124755859376, 1.5670743408203125, 1.565885498046875, 1.567392822265625, 1.56645068359375, 1.567561767578125, 1.5667598876953126, 1.566843994140625, 1.566246826171875, 1.56635546875, 1.5664219970703126, 1.565961181640625, 1.5663524169921874, 1.5675914306640626, 1.56609326171875, 1.5661405029296875, 1.5667711181640624, 1.566587890625, 1.5662694091796876, 1.566982177734375, 1.566482421875, 1.566496826171875, 1.5665531005859374, 1.5663370361328126, 1.5661158447265624, 1.566561279296875, 1.566202880859375, 1.566286865234375, 1.5659674072265626, 1.5661629638671875, 1.5660155029296876, 1.566634033203125, 1.5668284912109376, 1.5664476318359375, 1.5665438232421875, 1.5667864990234375, 1.5668736572265625, 1.56661865234375, 1.5671204833984376, 1.566720947265625, 1.5667332763671875, 1.566159912109375]",tokens/s,0.6286408862079101,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc383-096444295e3a9c040e944df2;5e7b9d84-5c52-420d-aa13-d15ef24f9b8e) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1400.639488,6094.848,0.0,5448.400896,5215.942144,s,10,5.915999877929687,0.5915999877929687,0.0028828720813889874,0.5905971069335938,0.5921576904296875,0.5961826965332031,0.5994027014160156,"[0.6002077026367187, 0.5907606201171876, 0.5909608764648437, 0.5905933227539063, 0.5902119140625, 0.5906008911132813, 0.5904852294921875, 0.5912632446289062, 0.5904329223632813, 0.590483154296875]",tokens/s,432.7248229923689,kWh,6.983380516370138e-06,3.826209227811299e-06,3.1648620090131334e-05,4.2458209834312766e-05,tokens/kWh,6029458.166017933,MB,1400.967168,6094.848,0.0,5448.400896,5405.644288,s,10,341.33091015625,34.133091015625,0.0089451948085196,34.130179687500004,34.14430546875,34.149687890625,34.153993828125,"[34.13034765625, 34.1351484375, 34.13001171875, 34.128140625, 34.12757421875, 34.12398828125, 34.13193359375, 34.1255859375, 34.143109375, 34.1550703125]",tokens/s,1.8457162280193342,kWh,0.00040300285582741096,0.00022088092240917615,0.0018324814267682385,0.002456365205004826,tokens/kWh,25647.65201511484,,s,629,346.1052627563478,0.5502468406301234,0.07016038395822202,0.5417257080078125,0.5423982788085937,0.54271630859375,1.1323166894531251,"[0.5417594604492187, 0.5414471435546875, 0.5411215209960938, 0.5414297485351562, 0.5414563598632812, 0.541576171875, 0.5412239379882813, 0.5414502563476562, 0.5416427612304687, 0.54096484375, 0.54148095703125, 0.5411840209960938, 0.5414339599609375, 0.5409760131835938, 0.5409024047851563, 0.5412208862304687, 0.5413017578125, 0.5412515869140625, 0.5410549926757813, 0.5412177734375, 0.5411133422851563, 0.54131201171875, 0.5422049560546875, 0.5416530151367187, 0.54127001953125, 0.5410795288085938, 0.5415239868164062, 0.5422069702148438, 0.5421731567382813, 0.5426155395507812, 0.542755859375, 0.541576171875, 0.5415362548828125, 0.5411993408203125, 0.5416642456054688, 0.541591552734375, 0.541427734375, 0.5414830322265625, 0.5413529663085938, 0.5414666137695312, 0.5418700561523437, 0.5421414184570312, 0.5417953491210937, 0.5417728271484376, 0.5417267456054687, 0.5425919799804687, 0.5420779418945313, 0.5423953857421875, 0.542466064453125, 0.5427732543945313, 0.5427783813476562, 0.5423319091796875, 0.5419212646484375, 0.5422335815429687, 0.5419622192382813, 0.542166015625, 0.542202880859375, 0.5419612426757813, 0.541928466796875, 0.5428971557617187, 0.5427886352539063, 0.542540771484375, 1.132921875, 0.5414379272460937, 0.541675537109375, 0.5412976684570312, 0.5413980102539062, 0.541432861328125, 0.5411819458007813, 0.5411235961914063, 0.5415782470703125, 0.5412136840820313, 0.5412894897460937, 0.5412925415039063, 0.5418301391601562, 0.541549560546875, 0.5421035766601563, 0.5417698974609375, 0.5415985717773437, 0.5415782470703125, 0.5415618286132813, 0.5418792724609375, 0.5416151123046875, 0.5421527099609375, 0.5418721313476562, 0.5414307861328125, 0.5414574584960937, 0.5414255981445313, 0.5418076171875, 0.5416806640625, 0.5416058959960938, 0.5413232421875, 0.5415167846679687, 0.5413140869140625, 0.5417481689453125, 0.5427220458984375, 0.5421270751953124, 0.541823974609375, 0.54205029296875, 0.5420144653320312, 0.5415424194335937, 0.541971435546875, 0.5416202392578126, 0.541264892578125, 0.5415219116210938, 0.541216796875, 0.5416058959960938, 0.541259765625, 0.5414993896484375, 0.5412925415039063, 0.5416744995117188, 0.5412996826171875, 0.5416222534179688, 0.5422120971679687, 0.5429841918945313, 0.542635009765625, 0.5428388061523437, 0.542835693359375, 0.5432268676757812, 0.5427363891601562, 0.5429586181640625, 0.5425602416992188, 0.5428838500976563, 0.5426534423828125, 0.5426380615234375, 1.1323843994140625, 0.5415637817382812, 0.5415362548828125, 0.541212646484375, 0.5413027954101562, 0.54141748046875, 0.54186083984375, 0.542003173828125, 0.5418147583007813, 0.5417257080078125, 0.5426544799804688, 0.5414871215820313, 0.5419520263671875, 0.5414297485351562, 0.5414932250976563, 0.5413765258789063, 0.5414635620117187, 0.5415690307617187, 0.5419008178710938, 0.5413099365234375, 0.5413130493164062, 0.5413457641601562, 0.5415126953125, 0.541191162109375, 0.541591552734375, 0.5423012084960938, 0.5414676513671876, 0.5414563598632812, 0.5417738037109375, 0.541896728515625, 0.5416048583984375, 0.5420687255859375, 0.5419673461914063, 0.5420953369140625, 0.541780029296875, 0.541761474609375, 0.5418792724609375, 0.5419478759765625, 0.54186083984375, 0.5423062744140625, 0.54225, 0.5419407348632812, 0.542171142578125, 0.5424015502929688, 0.5422223510742188, 0.5414348754882813, 0.5414635620117187, 0.54171337890625, 0.5415720825195313, 0.5418065795898438, 0.5415321655273437, 0.54148095703125, 0.5417645874023438, 0.54140625, 0.5415218505859375, 0.5416734619140625, 0.5417164916992188, 0.5417820434570313, 0.5417778930664062, 0.54166015625, 0.5416734619140625, 0.5428797607421875, 0.542571533203125, 1.1325224609375, 0.5419622192382813, 0.541770751953125, 0.5419089965820313, 0.5416908569335938, 0.5413734130859374, 0.5414747924804687, 0.5413980102539062, 0.5412987060546876, 0.5413621826171875, 0.5414256591796875, 0.5414717407226562, 0.5418414306640625, 0.5414390258789062, 0.5416283569335938, 0.5422162475585938, 0.5423297729492188, 0.5422459106445312, 0.5416038208007813, 0.5418384399414062, 0.5420675659179688, 0.5416325073242187, 0.5418936157226563, 0.54164892578125, 0.5416837158203125, 0.5414542846679687, 0.5416089477539062, 0.541623291015625, 0.541696044921875, 0.5414962768554688, 0.5420676879882812, 0.5414850463867188, 0.5416734619140625, 0.5414481811523437, 0.5424260864257813, 0.5415341796875, 0.5418352661132813, 0.5416427612304687, 0.5419017944335938, 0.5418506469726563, 0.5419898681640625, 0.541454345703125, 0.5417994384765625, 0.5414666137695312, 0.541697021484375, 0.5416581420898438, 0.5419612426757813, 0.5414061889648437, 0.5416857299804687, 0.5413396606445312, 0.5420892333984375, 0.5414430541992188, 0.5420545654296876, 0.5417859497070312, 0.5416704711914062, 0.5413395385742188, 0.5417564086914063, 0.5420462036132813, 0.5418311767578124, 0.5419110107421875, 0.5419161376953125, 0.5414850463867188, 0.541528076171875, 1.131968505859375, 0.5414400024414062, 0.5414932250976563, 0.5411287231445312, 0.541296630859375, 0.5412792358398437, 0.5417031860351562, 0.5415956420898438, 0.5414348754882813, 0.5413161010742188, 0.5413847045898438, 0.541760498046875, 0.5419417724609376, 0.5416417236328125, 0.5418035278320312, 0.54144921875, 0.54145947265625, 0.5413785400390625, 0.541380615234375, 0.5414000854492188, 0.5414727783203125, 0.5413887939453125, 0.5414727783203125, 0.5413324584960938, 0.5414451293945313, 0.54200830078125, 0.541663330078125, 0.5414224853515625, 0.5416151123046875, 0.5414993896484375, 0.54185986328125, 0.5417636108398437, 0.5419540405273438, 0.5416038208007813, 0.5418005981445313, 0.541447021484375, 0.541686767578125, 0.5417062377929688, 0.5416693725585937, 0.5417902221679688, 0.5416990966796875, 0.5416304931640625, 0.5417277221679687, 0.5417277221679687, 0.5418322143554688, 0.5419100341796875, 0.5417410888671875, 0.5418536987304687, 0.5424578247070313, 0.5422274780273437, 0.5421045532226563, 0.5419489135742187, 0.5420206298828125, 0.5418895263671875, 0.541897705078125, 0.5418690795898438, 0.5419857788085938, 0.5421117553710938, 0.54192333984375, 0.5418588256835938, 0.5421434936523437, 0.5418905639648437, 0.5424384155273437, 1.1324405517578124, 0.5413406982421874, 0.5414522705078125, 0.5422376708984376, 0.5428193359375, 0.541686767578125, 0.5414686889648438, 0.5414215698242187, 0.541285400390625, 0.5416571044921875, 0.5415997314453125, 0.5419386596679687, 0.5416673583984375, 0.541638671875, 0.5414850463867188, 0.5416683349609375, 0.5423175659179688, 0.5418322143554688, 0.5414451904296875, 0.5412515258789062, 0.5415239868164062, 0.542023681640625, 0.541918212890625, 0.5417359619140625, 0.541549560546875, 0.5413662719726563, 0.5415792846679688, 0.5414010620117188, 0.5414441528320313, 0.5412525634765625, 0.54166015625, 0.5412556762695313, 0.541591552734375, 0.5411932373046875, 0.541432861328125, 0.5416161499023437, 0.5414912109375, 0.5413294067382812, 0.541365234375, 0.5413304443359375, 0.5423472900390625, 0.541759521484375, 0.5416038208007813, 0.5411983642578125, 0.5415557250976563, 0.5414194946289063, 0.5415966796875, 0.54135498046875, 0.5417625732421875, 0.5416345825195312, 0.5418465576171875, 0.541486083984375, 0.5419673461914063, 0.541765625, 0.542076904296875, 0.5416171264648437, 0.5418035278320312, 0.5415731201171875, 0.541892578125, 0.54164892578125, 0.5422459106445312, 0.5417850952148437, 0.5420390625, 1.132142578125, 0.5416161499023437, 0.541454345703125, 0.5413867797851563, 0.5415997314453125, 0.541613037109375, 0.5415372924804688, 0.5415844116210937, 0.5418772583007813, 0.5416714477539063, 0.5416253662109375, 0.5415557250976563, 0.5419458618164062, 0.5417195434570312, 0.5419008178710938, 0.541591552734375, 0.5416642456054688, 0.5415946044921875, 0.5415925903320312, 0.5416468505859375, 0.541644775390625, 0.5415782470703125, 0.5415966796875, 0.541538330078125, 0.541675537109375, 0.5414788818359375, 0.5420431518554687, 0.5420267333984375, 0.5419776000976563, 0.5416171264648437, 0.541822998046875, 0.54211376953125, 0.5417615356445312, 0.5421096801757812, 0.5417891845703126, 0.5419161376953125, 0.541697021484375, 0.5416591186523437, 0.5419724731445312, 0.5416560668945313, 0.54164892578125, 0.5418291015625, 0.5418055419921874, 0.541601806640625, 0.5418721313476562, 0.54198681640625, 0.5417297973632812, 0.5416796264648438, 0.5417349243164062, 0.5415823364257812, 0.5425029296875, 0.542382080078125, 0.5421414184570312, 0.5416663208007813, 0.5417011108398437, 0.54160302734375, 0.5418053588867188, 0.5416949462890625, 0.5418045654296875, 0.5421281127929688, 0.5423175659179688, 0.5419632568359375, 0.542244873046875, 1.1331278076171876, 0.5414716186523437, 0.54160791015625, 0.54152294921875, 0.5418578491210938, 0.5414696655273438, 0.5420236206054687, 0.5413458862304688, 0.5417337646484375, 0.5414185180664063, 0.5415403442382812, 0.5418291015625, 0.5415823974609375, 0.5414583740234375, 0.541802490234375, 0.541475830078125, 0.5415629272460938, 0.5419693603515625, 0.5415751953125, 0.5413673095703125, 0.5415239868164062, 0.5413990478515625, 0.5414297485351562, 0.5419171752929688, 0.542044189453125, 0.5415536499023438, 0.5416273803710937, 0.5416406860351562, 0.5414400024414062, 0.541470703125, 0.5419960327148438, 0.5414901733398437, 0.5415792846679688, 0.5415465087890625, 0.5416714477539063, 0.5413161010742188, 0.5425889282226563, 0.5413949584960938, 0.5415833740234375, 0.541106201171875, 0.5417615356445312, 0.5417666625976563, 0.541760498046875, 0.5414307861328125, 0.5415894775390625, 0.5413898315429687, 0.5415833740234375, 0.5412874145507812, 0.5417349243164062, 0.5415741577148437, 0.5416611938476562, 0.5413416748046875, 0.5422161865234375, 0.5416673583984375, 0.5423308715820313, 0.5420431518554687, 0.5420534057617188, 0.5417984008789063, 0.5419888916015625, 0.5413898315429687, 0.5425121459960938, 0.541970458984375, 0.5419970092773437, 1.13246826171875, 0.5414359130859375, 0.5415659790039062, 0.54150244140625, 0.5414912719726562, 0.5414020385742188, 0.541581298828125, 0.5415567626953125, 0.5416243286132812, 0.5418076171875, 0.5416089477539062, 0.5414696655273438, 0.5415587768554687, 0.541931640625, 0.542152587890625, 0.541929443359375, 0.541939697265625, 0.54158642578125, 0.5415321655273437, 0.5415823364257812, 0.5417267456054687, 0.5416222534179688, 0.54167041015625, 0.54148095703125, 0.5416376342773438, 0.5415116577148438, 0.5416591186523437, 0.5424230346679687, 0.5425469360351562, 0.5417778930664062, 0.5417523193359375, 0.5418987426757812, 0.5419951171875, 0.5423696899414062, 0.5424568481445312, 0.5426206665039063, 0.542086181640625, 0.541823974609375, 0.54186083984375, 0.5420349731445312, 0.5424475708007812, 0.5419776000976563, 0.5422203369140625, 0.5420625, 0.542244873046875, 0.5426073608398437, 0.5429688110351563, 0.5421936645507812, 0.542075927734375, 0.5420676879882812, 0.5423175659179688, 0.5427466430664063, 0.5428531494140625, 0.542118896484375, 0.5421475830078125, 0.542002197265625, 0.5421066284179688, 0.5419857788085938, 0.5419765625, 0.5419990844726562, 0.5420123901367188, 0.5418936157226563, 0.5418700561523437, 1.1330999755859374, 0.5421752319335937, 0.5420472412109375, 0.5415352172851563, 0.541517822265625, 0.5416509399414062, 0.54187109375, 0.5422120971679687, 0.5417625732421875, 0.5419806518554687, 0.5415413818359375, 0.54137548828125, 0.5424609375, 0.5421240234375, 0.54167041015625, 0.5416581420898438, 0.5418803100585937, 0.5415997314453125, 0.5424701538085938, 0.5428551635742187, 0.5424916381835938, 0.542624755859375, 0.542044189453125, 0.54175537109375, 0.541865966796875, 0.54215576171875, 0.5419192504882813, 0.5422418212890625, 0.54240869140625, 0.5427630004882813, 0.543088623046875, 0.5421854858398437, 0.5420318603515625, 0.5419612426757813, 0.5423749389648438, 0.541897705078125, 0.5418147583007813, 0.5428131713867187, 0.542508056640625, 0.5420390625, 0.5422202758789062, 0.542045166015625, 0.542202880859375, 0.5423974609375, 0.5422069702148438, 0.541749267578125, 0.5420185546875, 0.5419468994140625, 0.5423267822265625, 0.5417062377929688, 0.5420277709960938, 0.5420349731445312, 0.5422673950195313, 0.5421270751953124, 0.5421915893554687, 0.5427077026367187, 0.5426472778320313, 0.542382080078125, 0.54236572265625, 0.542023681640625, 0.5423565063476562, 0.542160888671875, 0.5429483642578125]",tokens/s,1.8173661821571478,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3d9-067622f76880236602717970;60d11986-191d-4894-96b9-892841d6963d) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3015.59808,9299.296256,0.0,8652.849152,8210.185216,s,10,11.00354541015625,1.100354541015625,0.0021995629105345756,1.10037060546875,1.1026337646484374,1.103404345703125,1.104020810546875,"[1.1041749267578125, 1.1024625244140625, 1.0971700439453125, 1.0986126708984374, 1.0993076171875, 1.097432861328125, 1.099380126953125, 1.101361083984375, 1.1021866455078124, 1.1014569091796875]",tokens/s,232.65228656548507,kWh,1.2965490321318309e-05,7.10465502763327e-06,5.986921456200278e-05,7.993935991095436e-05,tokens/kWh,3202427.443566751,MB,3016.822784,9299.296256,0.0,8652.849152,8503.104,s,10,641.11232421875,64.11123242187502,0.005190298699693196,64.11128124999999,64.116784765625,64.1185056640625,64.11988238281249,"[64.1162265625, 64.11091796875, 64.10715234375, 64.11164453125, 64.1140703125, 64.10447265625, 64.1202265625, 64.106796875, 64.1044140625, 64.11640234375]",tokens/s,0.9826671180712498,kWh,0.0007569437074826825,0.00041487128047605436,0.0034860815944185997,0.004657896582377336,tokens/kWh,13525.418369818235,,s,629,649.9943001708987,1.0333772657724936,0.13070199113792613,1.0176091918945311,1.0182221801757814,1.0184349731445312,2.1165633984375,"[1.0176849975585938, 1.0179225463867188, 1.0177464599609376, 1.0181263427734375, 1.01766552734375, 1.0177433471679687, 1.0174985961914063, 1.0180587768554688, 1.0172999877929687, 1.0173552856445311, 1.017459716796875, 1.0184939575195313, 1.017143310546875, 1.0182799072265625, 1.0176931762695312, 1.0179122924804687, 1.0178191528320313, 1.0179092407226562, 1.0176112670898438, 1.0177853393554688, 1.0181171264648436, 1.0180086059570312, 1.0171443481445313, 1.0179840087890626, 1.0173839111328125, 1.0177720336914062, 1.0175538940429687, 1.0174218139648437, 1.0173317260742187, 1.0178887939453125, 1.0179092407226562, 1.0181570434570313, 1.0174976196289063, 1.0177576904296874, 1.017449462890625, 1.017692138671875, 1.0180843505859376, 1.0179747924804687, 1.0176112670898438, 1.0182471923828125, 1.0175887451171874, 1.017427978515625, 1.0171924438476563, 1.0172886962890626, 1.0171688842773436, 1.0173245239257813, 1.0170091552734375, 1.0176368408203125, 1.0171146240234374, 1.0184765625, 1.0177085571289062, 1.017554931640625, 1.0172211303710939, 1.0182522583007811, 1.017290771484375, 1.0179625244140624, 1.0176573486328124, 1.0178734130859375, 1.0176276245117188, 1.0185001220703125, 1.0183004150390624, 1.0180914916992188, 2.120390625, 1.0169886474609375, 1.0174678955078125, 1.0169763793945312, 1.017365478515625, 1.0172262573242188, 1.0176307373046876, 1.017291748046875, 1.0174207763671874, 1.017438232421875, 1.0176419677734374, 1.01682275390625, 1.0177566528320312, 1.0169026489257813, 1.0171187133789064, 1.0169517822265626, 1.0179747924804687, 1.0175006713867187, 1.0174310302734375, 1.0172119140625, 1.018013671875, 1.0172262573242188, 1.0173501586914062, 1.0170921020507813, 1.0172692260742187, 1.0172661743164062, 1.01758154296875, 1.0180413208007812, 1.017764892578125, 1.0175068359375, 1.0178314208984376, 1.0169129028320312, 1.0174238891601564, 1.01768701171875, 1.0177402954101562, 1.0183987426757812, 1.01777099609375, 1.0177843017578125, 1.0177607421875, 1.0173163452148437, 1.0173081665039063, 1.017702392578125, 1.0177484741210938, 1.0175764770507814, 1.01732763671875, 1.018228759765625, 1.018945556640625, 1.0175508422851562, 1.0177587280273437, 1.0177536010742188, 1.0183505859375, 1.0178427124023437, 1.01798095703125, 1.0178457641601562, 1.0182246704101563, 1.0184437866210938, 1.0184017944335937, 1.0174044189453124, 1.01778125, 1.0175907592773437, 1.0179645385742186, 1.0172713012695314, 1.0177116088867189, 2.116588623046875, 1.0172682495117187, 1.017439208984375, 1.0171781005859375, 1.0176378784179687, 1.0172579956054688, 1.0172876586914064, 1.0171351318359374, 1.017439208984375, 1.0174095458984376, 1.0178488159179688, 1.017796630859375, 1.0178846435546876, 1.0169200439453125, 1.0174443359375, 1.0173378295898436, 1.0173532104492187, 1.0172938232421875, 1.0175784912109376, 1.0172467041015625, 1.0177372436523437, 1.01789697265625, 1.0177136840820313, 1.0170101928710937, 1.01732763671875, 1.0172815551757812, 1.0176215209960937, 1.0174166870117187, 1.0176153564453125, 1.017275390625, 1.0179368896484375, 1.0172160034179687, 1.0177505493164063, 1.0171678466796874, 1.01737060546875, 1.01718017578125, 1.0172098388671875, 1.0171494140625, 1.0173306884765625, 1.0171054077148438, 1.0177413330078124, 1.017786376953125, 1.0182041625976563, 1.0170777587890625, 1.0175405883789062, 1.0175836181640625, 1.0175068359375, 1.017169921875, 1.0173778076171875, 1.0175529174804687, 1.0178037719726563, 1.0177454223632814, 1.0187315063476563, 1.01823486328125, 1.0180054931640625, 1.0178140258789063, 1.0181683349609374, 1.017670654296875, 1.0181611328125, 1.0182564086914063, 1.0183587646484376, 1.0175396118164062, 1.0181222534179688, 2.11649853515625, 1.0170214233398438, 1.0172620849609375, 1.0174719848632812, 1.0172548828125, 1.0174484252929688, 1.017628662109375, 1.0173849487304687, 1.0173992919921875, 1.0177689819335938, 1.0176215209960937, 1.01728564453125, 1.0174402465820314, 1.0171627807617187, 1.0172057495117188, 1.0171904296875, 1.0177679443359375, 1.0173214721679686, 1.0170664672851562, 1.0171207885742188, 1.0171893920898438, 1.0169364624023438, 1.0172098388671875, 1.0170009765625, 1.0171842651367187, 1.01760205078125, 1.0179368896484375, 1.0174985961914063, 1.0172743530273438, 1.0170787963867187, 1.0172272338867188, 1.0170316772460937, 1.0173040771484374, 1.0175529174804687, 1.0179287109375, 1.0178242797851562, 1.0183321533203125, 1.01816015625, 1.0178682861328125, 1.0174054565429687, 1.0174771118164063, 1.0178682861328125, 1.0177188110351563, 1.017955322265625, 1.0186710815429687, 1.0180361938476563, 1.018166259765625, 1.017849853515625, 1.0181478271484374, 1.0177095947265624, 1.017891845703125, 1.0177259521484374, 1.0180966186523437, 1.0177996826171876, 1.0185390014648437, 1.0180700073242188, 1.0183905029296876, 1.0182389526367188, 1.0185400390625, 1.0179686279296876, 1.0179194946289063, 1.0175518798828125, 1.0181058349609375, 2.11787255859375, 1.017354248046875, 1.0174566650390624, 1.017512939453125, 1.0176153564453125, 1.0173112182617188, 1.0177638549804688, 1.0170715942382813, 1.017206787109375, 1.0171371459960938, 1.0176399536132812, 1.0181683349609374, 1.0179246215820312, 1.017238525390625, 1.0177177734375, 1.01699072265625, 1.01747509765625, 1.0175672607421875, 1.0181283569335937, 1.01778125, 1.01810791015625, 1.018156005859375, 1.018429443359375, 1.0175344848632812, 1.0176460571289063, 1.0172333984375, 1.0177321166992188, 1.01758154296875, 1.0172559204101563, 1.0175887451171874, 1.0181652221679687, 1.0181017456054688, 1.0174576416015626, 1.0177464599609376, 1.017849853515625, 1.0172507934570312, 1.0176378784179687, 1.0174627685546875, 1.0175211791992187, 1.017702392578125, 1.0177321166992188, 1.01766552734375, 1.0176245727539062, 1.017238525390625, 1.0175139770507813, 1.0173101806640625, 1.0175518798828125, 1.01707568359375, 1.0175723266601562, 1.0176614379882813, 1.018265625, 1.0177587280273437, 1.0177955932617186, 1.017280517578125, 1.0177669067382813, 1.01758056640625, 1.0177003784179688, 1.0202439575195312, 1.0180403442382813, 1.0180946044921875, 1.0176676025390625, 1.0172211303710939, 1.0179778442382812, 2.115203125, 1.0167890014648437, 1.0172354736328124, 1.0170439453125, 1.0171873168945313, 1.0175703125, 1.0176470947265626, 1.0175744018554687, 1.0177515258789063, 1.0173716430664062, 1.0174197998046874, 1.0172938232421875, 1.0174003295898437, 1.0167725830078125, 1.0175344848632812, 1.0169354248046876, 1.017470947265625, 1.017322509765625, 1.017417724609375, 1.0173368530273437, 1.0175313720703125, 1.0170009765625, 1.0170460205078125, 1.0174044189453124, 1.017523193359375, 1.0177791748046876, 1.0174197998046874, 1.0170818481445312, 1.0176419677734374, 1.0173101806640625, 1.0176409301757812, 1.0174095458984376, 1.0175293579101563, 1.0173173828125, 1.0182144165039062, 1.0182215576171876, 1.0176747436523437, 1.01743310546875, 1.0186895141601562, 1.0174781494140626, 1.0176266479492186, 1.0174013671875, 1.0175191040039062, 1.0173880615234374, 1.017828369140625, 1.0177105712890624, 1.0176378784179687, 1.0176849975585938, 1.017650146484375, 1.0173193969726562, 1.0175958862304688, 1.017544677734375, 1.01745458984375, 1.0172119140625, 1.017970703125, 1.0179348754882813, 1.017543701171875, 1.0179287109375, 1.0183065795898438, 1.017802734375, 1.0182492065429687, 1.0176123046875, 1.0174044189453124, 2.1172890625, 1.0174003295898437, 1.0177330932617188, 1.0177269897460937, 1.0172897338867188, 1.0172108764648438, 1.0175191040039062, 1.0172661743164062, 1.0171361083984376, 1.0175949096679688, 1.0182318115234374, 1.0174924926757813, 1.0179317626953126, 1.0172047119140626, 1.017650146484375, 1.017218017578125, 1.0178754272460937, 1.017364501953125, 1.017511962890625, 1.0177156982421875, 1.0180003662109376, 1.0175949096679688, 1.01768603515625, 1.0176327514648438, 1.0180106201171875, 1.0180044555664063, 1.0175570068359374, 1.0175354614257812, 1.0178191528320313, 1.0182748413085938, 1.0180044555664063, 1.0177034301757812, 1.0174884033203124, 1.0172713012695314, 1.0173092041015626, 1.01715966796875, 1.0175641479492188, 1.0176747436523437, 1.0178734130859375, 1.0186076049804687, 1.0178191528320313, 1.017248779296875, 1.0178734130859375, 1.0178088989257812, 1.0177566528320312, 1.0173737182617189, 1.0184867553710937, 1.0182072143554688, 1.018197998046875, 1.0183782348632813, 1.0181672973632812, 1.0175877075195312, 1.0179911499023437, 1.0176522216796875, 1.0181212158203126, 1.017575439453125, 1.0183485717773437, 1.01770751953125, 1.0183301391601562, 1.0184693603515624, 1.0186875, 1.0184386596679686, 1.0182625122070312, 2.120072265625, 1.017439208984375, 1.0174781494140626, 1.0172333984375, 1.0177269897460937, 1.0175928344726564, 1.0170654907226562, 1.0174505004882812, 1.0172559204101563, 1.0168995971679688, 1.0167019653320313, 1.0168186645507813, 1.0172682495117187, 1.0168565673828125, 1.0178058471679687, 1.0173900756835939, 1.0174361572265624, 1.0177945556640624, 1.0173880615234374, 1.0177044677734375, 1.0172129516601562, 1.0170511474609376, 1.0175641479492188, 1.0171637573242187, 1.0182041625976563, 1.0174515380859375, 1.0173992919921875, 1.017302001953125, 1.01718017578125, 1.0170521850585938, 1.017697265625, 1.0175518798828125, 1.0177699584960938, 1.0171688842773436, 1.0180577392578125, 1.0177750854492187, 1.0180413208007812, 1.0171555786132813, 1.0174617309570313, 1.0173552856445311, 1.0172088623046875, 1.0174453735351563, 1.0177587280273437, 1.0179358520507813, 1.017807861328125, 1.0180464477539062, 1.01760205078125, 1.01793994140625, 1.01802392578125, 1.0177402954101562, 1.01783349609375, 1.01783447265625, 1.0175006713867187, 1.017565185546875, 1.0181806030273437, 1.01722314453125, 1.0179891357421875, 1.0179143676757811, 1.0177699584960938, 1.018265625, 1.018239990234375, 1.0177269897460937, 1.01764404296875, 2.12052685546875, 1.0178191528320313, 1.0170664672851562, 1.0168914184570312, 1.016933349609375, 1.0168248291015625, 1.0170194091796876, 1.0169968872070312, 1.0170706176757813, 1.01739111328125, 1.0174085083007813, 1.0172446899414063, 1.01743408203125, 1.0172498168945312, 1.0173173828125, 1.0169630737304687, 1.0172764282226563, 1.017670654296875, 1.0172395629882813, 1.0175570068359374, 1.01774951171875, 1.0171514892578124, 1.0176829223632813, 1.0178232421875, 1.0177474365234376, 1.0177638549804688, 1.0182195434570311, 1.0177802124023438, 1.01798193359375, 1.0181058349609375, 1.0178447265625, 1.0170623779296875, 1.017565185546875, 1.01713818359375, 1.017654296875, 1.0177515258789063, 1.0174115600585938, 1.0176266479492186, 1.01768603515625, 1.0180618286132812, 1.0176409301757812, 1.01743408203125, 1.0178948974609374, 1.0173562622070313, 1.0175979614257813, 1.0175078125, 1.0172713012695314, 1.0172344360351562, 1.0179164428710938, 1.0178744506835937, 1.0179573974609375, 1.01726513671875, 1.017871337890625, 1.0172272338867188, 1.017660400390625, 1.01722314453125, 1.0177474365234376, 1.0172640991210937, 1.0182471923828125, 1.0183259887695313, 1.0176481323242188, 1.0172948608398438, 1.0174535522460937, 2.119248779296875, 1.0171259155273438, 1.0173378295898436, 1.0171361083984376, 1.0176768188476562, 1.0173480834960937, 1.0173060913085938, 1.0175313720703125, 1.0172876586914064, 1.0169313354492187, 1.0169517822265626, 1.0170767211914062, 1.016911865234375, 1.0168883056640625, 1.0176091918945311, 1.0169405517578125, 1.0172006225585937, 1.0173839111328125, 1.0174832763671875, 1.0169149169921874, 1.0174115600585938, 1.017101318359375, 1.0171627807617187, 1.0175191040039062, 1.0177177734375, 1.0175150146484375, 1.0177362060546875, 1.0174299926757813, 1.0178037719726563, 1.0174238891601564, 1.0178887939453125, 1.0173532104492187, 1.0172507934570312, 1.01740234375, 1.0190120849609374, 1.0181201782226563, 1.018503173828125, 1.0183167724609374, 1.0181693725585939, 1.0181580810546875, 1.0182164306640624, 1.0177146606445313, 1.0178099365234374, 1.0184202270507812, 1.0194851684570312, 1.019615234375, 1.0187745361328124, 1.018102783203125, 1.0178682861328125, 1.01852978515625, 1.018186767578125, 1.017744384765625, 1.0179573974609375, 1.0179799194335937, 1.0177484741210938, 1.01732861328125, 1.0178109741210937, 1.0174668579101562, 1.017786376953125, 1.017491455078125, 1.0179041137695313, 1.0180321044921874, 1.0186588134765624]",tokens/s,0.9677007934294523,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2dc-26f32fc11033541604a4d15d;b000f2f6-832f-43ea-b7fe-6c6eab1a3205) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3001.430016,9299.296256,0.0,8652.849152,8210.185216,s,10,11.016480834960937,1.1016480834960938,0.0021733642538701277,1.1017757568359374,1.103871484375,1.1043791625976562,1.1047853051757812,"[1.1037586669921875, 1.1048868408203125, 1.09813720703125, 1.0993441162109374, 1.10018701171875, 1.0994322509765626, 1.101841796875, 1.101709716796875, 1.1036807861328124, 1.10350244140625]",tokens/s,232.3791089324831,kWh,1.2973552644252778e-05,7.108400666602392e-06,6.117329893860212e-05,8.12552522494573e-05,tokens/kWh,3150565.568537876,MB,3002.59328,9299.296256,0.0,8652.849152,8503.104,s,10,641.81039453125,64.18103945312501,0.013566927048965604,64.180421875,64.20006953125001,64.202245703125,64.20398664062499,"[64.180421875, 64.1733046875, 64.15237109375, 64.17387890625, 64.17909765625, 64.204421875, 64.1840859375, 64.180421875, 64.1828046875, 64.1995859375]",tokens/s,0.9815983121621521,kWh,0.0007575022853910923,0.00041517883106595037,0.0035625019055547965,0.004735183022011839,tokens/kWh,13304.659969242997,,s,629,650.7052465209968,1.0345075461383086,0.13087387967036276,1.0186967163085938,1.0195488891601563,1.0197336059570312,2.11880048828125,"[1.0175508422851562, 1.0181458129882812, 1.0181683349609374, 1.0183413696289063, 1.0181898193359376, 1.0181119995117187, 1.0183444213867188, 1.018092529296875, 1.0177116088867189, 1.0180689697265626, 1.0175825805664063, 1.0178242797851562, 1.0176737060546874, 1.0177935180664062, 1.0175949096679688, 1.017802734375, 1.018124267578125, 1.01833935546875, 1.0179113159179687, 1.0183004150390624, 1.0186444702148438, 1.0186260375976564, 1.0186373291015625, 1.0180352172851563, 1.0180280151367187, 1.0180382690429688, 1.019072509765625, 1.0186967163085938, 1.0191267700195312, 1.0186731567382812, 1.0187960205078126, 1.0190377197265625, 1.0188656616210938, 1.01879296875, 1.0181878051757813, 1.0191165161132814, 1.0185533447265624, 1.0191687622070313, 1.0184427490234376, 1.0186332397460938, 1.0186659545898438, 1.01909912109375, 1.0189649658203126, 1.0186096801757814, 1.0187254028320312, 1.0194135131835937, 1.01941455078125, 1.0202962036132812, 1.019704345703125, 1.0197586059570312, 1.0201170043945313, 1.0203515014648437, 1.0195978393554688, 1.020099609375, 1.0194329833984375, 1.020137451171875, 1.0197483520507813, 1.0200903930664063, 1.01933056640625, 1.01880419921875, 1.019261962890625, 1.0196900024414062, 2.1243330078125, 1.0192138061523437, 1.0183055419921876, 1.0189486083984376, 1.0191370239257813, 1.0183291015625, 1.01895166015625, 1.0190971069335937, 1.019052001953125, 1.0191104125976562, 1.018608642578125, 1.0190632934570312, 1.0185861206054687, 1.0184775390625, 1.0194800415039063, 1.0183649291992187, 1.0192107543945312, 1.0193479614257812, 1.01952001953125, 1.0189434814453124, 1.01899365234375, 1.019052001953125, 1.0188851318359375, 1.0190868530273438, 1.0185226440429687, 1.0191022338867188, 1.0190540771484375, 1.0195435791015626, 1.0188001098632813, 1.0175877075195312, 1.01774951171875, 1.017987060546875, 1.0180618286132812, 1.018218505859375, 1.0183157958984375, 1.0183516235351562, 1.018313720703125, 1.0178468017578124, 1.0180331420898439, 1.018102783203125, 1.018229736328125, 1.0181058349609375, 1.0183147583007812, 1.018461181640625, 1.0184550170898437, 1.017839599609375, 1.0192803955078125, 1.0181570434570313, 1.01834033203125, 1.018218505859375, 1.0189649658203126, 1.0185697021484375, 1.01888818359375, 1.0182881469726563, 1.0184069213867188, 1.0182307739257812, 1.0182891235351563, 1.0182410278320313, 1.0186854248046875, 1.0180198364257813, 1.0185799560546875, 1.0184140625, 1.0185236206054689, 2.1177333984375, 1.01767578125, 1.0179533081054688, 1.01821337890625, 1.0182543334960938, 1.0181304321289062, 1.0179686279296876, 1.0175242309570312, 1.0182492065429687, 1.0181949462890625, 1.0180792236328124, 1.017970703125, 1.0186107177734376, 1.0181621704101562, 1.0177321166992188, 1.017511962890625, 1.0179542846679688, 1.0180269775390625, 1.0183741455078126, 1.0180157470703124, 1.0179850463867188, 1.0181283569335937, 1.0180433959960937, 1.0179522705078126, 1.0176942138671874, 1.0177587280273437, 1.0178385620117187, 1.0181632080078125, 1.018629150390625, 1.0183321533203125, 1.0186834106445313, 1.01846630859375, 1.018461181640625, 1.018250244140625, 1.01829638671875, 1.01803515625, 1.01867724609375, 1.0185482177734375, 1.0195558471679687, 1.0182451171875, 1.0182113037109375, 1.0181283569335937, 1.0183987426757812, 1.0187427978515624, 1.0183946533203125, 1.0181652221679687, 1.018517578125, 1.0183423461914063, 1.0193663940429687, 1.0184171752929687, 1.0185789184570313, 1.0189977416992186, 1.0188840942382813, 1.0188493041992188, 1.01865673828125, 1.0181990356445312, 1.018498046875, 1.0186240234375, 1.0186342163085937, 1.0180474853515624, 1.0181181640625, 1.0185072631835939, 1.0182072143554688, 2.11782666015625, 1.0176593627929686, 1.0179297485351562, 1.0178068237304687, 1.0177402954101562, 1.0177413330078124, 1.0181898193359376, 1.0177003784179688, 1.0179614868164062, 1.0176676025390625, 1.0177188110351563, 1.0182338256835937, 1.0195701904296874, 1.01842431640625, 1.0179614868164062, 1.0182686767578124, 1.0188646240234376, 1.0179307250976561, 1.0180372314453126, 1.0181652221679687, 1.01809765625, 1.0183987426757812, 1.0185236206054689, 1.0191226806640625, 1.0184785766601563, 1.0190673828125, 1.0196029663085937, 1.0190448608398437, 1.0190960693359374, 1.0191595458984375, 1.0192824096679687, 1.0184365844726562, 1.0186434326171876, 1.0191533813476563, 1.0193930053710938, 1.0196664428710938, 1.019341796875, 1.01850830078125, 1.0184909057617189, 1.018365966796875, 1.0184058837890626, 1.019093994140625, 1.0190244140625, 1.0187202758789062, 1.0190602416992187, 1.0189383544921875, 1.0194821166992187, 1.0181652221679687, 1.0186793212890626, 1.0183731079101563, 1.0187816772460938, 1.01879296875, 1.0193489990234375, 1.018956787109375, 1.019167724609375, 1.0183106689453125, 1.0189916381835937, 1.0191524047851563, 1.0189639892578124, 1.0186598510742189, 1.0192609252929687, 1.0185523071289062, 1.0191390991210938, 2.1198623046875, 1.0184703979492187, 1.018693603515625, 1.0184253540039063, 1.018598388671875, 1.017944091796875, 1.0177741088867187, 1.0175641479492188, 1.0179666748046876, 1.017946044921875, 1.0179491577148438, 1.0177464599609376, 1.0178068237304687, 1.0175989990234375, 1.0180106201171875, 1.0180372314453126, 1.0184130859375, 1.0181887817382813, 1.0183567504882813, 1.0175958862304688, 1.0183536376953124, 1.0183270263671875, 1.0187335815429688, 1.0194104614257813, 1.01911962890625, 1.0193428344726563, 1.0188841552734376, 1.0194349975585937, 1.0187448120117188, 1.018265625, 1.018481689453125, 1.0187151489257813, 1.0190418090820312, 1.0186414184570312, 1.0186311645507813, 1.019009033203125, 1.0189557495117187, 1.01874072265625, 1.0184427490234376, 1.01905615234375, 1.0199142456054688, 1.0191533813476563, 1.0189649658203126, 1.019109375, 1.0186588134765624, 1.0195548095703124, 1.0190469360351562, 1.0197084350585937, 1.0191718139648438, 1.019072509765625, 1.0192210083007813, 1.0191072998046875, 1.0190694580078126, 1.0184151000976562, 1.0187469482421876, 1.0190745239257812, 1.0195794067382813, 1.0200555419921875, 1.0184437866210938, 1.0190847778320313, 1.01943505859375, 1.0188318481445313, 1.0186168212890625, 2.11917919921875, 1.0183997192382812, 1.02013232421875, 1.0196019287109375, 1.018566650390625, 1.018514404296875, 1.0180843505859376, 1.0178980102539064, 1.018302490234375, 1.0179512329101563, 1.0179932250976562, 1.017970703125, 1.0190079956054687, 1.0196439208984376, 1.0198763427734374, 1.0188820190429688, 1.0180269775390625, 1.0179645385742186, 1.0182420654296875, 1.0186547241210937, 1.019009033203125, 1.0187325439453125, 1.0195947265625, 1.01844482421875, 1.0194401245117188, 1.0188646240234376, 1.0188635864257813, 1.0188073120117187, 1.0185891723632812, 1.0188493041992188, 1.0184304809570313, 1.0185359497070312, 1.0193643798828125, 1.0190960693359374, 1.0195015869140625, 1.0195343627929687, 1.0197268676757814, 1.0195292358398438, 1.0212003784179688, 1.0190858154296876, 1.0195117797851563, 1.0193694458007811, 1.0198139038085938, 1.0197166137695313, 1.0198231201171875, 1.0193295288085937, 1.0196602783203126, 1.01909912109375, 1.0193510131835937, 1.0191390991210938, 1.019315185546875, 1.01920361328125, 1.0198876342773437, 1.019504638671875, 1.0197114868164063, 1.0195404663085939, 1.0196654052734375, 1.019725830078125, 1.0191769409179687, 1.019283447265625, 1.0189865112304688, 1.0192742309570313, 1.0197012329101562, 2.120995849609375, 1.0189475708007814, 1.0187919311523437, 1.0181539916992188, 1.0187530517578125, 1.019020263671875, 1.0189700927734375, 1.0192455444335937, 1.0192025756835938, 1.018956787109375, 1.0191503295898436, 1.0182103271484375, 1.0184130859375, 1.01810791015625, 1.0181693725585939, 1.018672119140625, 1.0189906005859375, 1.0192138061523437, 1.0188738403320312, 1.0185830688476563, 1.0189445190429687, 1.0188543701171875, 1.0183229370117188, 1.0181427001953125, 1.0185963745117188, 1.0190100708007812, 1.0193192749023436, 1.0195661010742187, 1.0191524047851563, 1.0190028686523438, 1.0189066162109375, 1.018893310546875, 1.0182830200195312, 1.01808740234375, 1.01798095703125, 1.018049560546875, 1.0186854248046875, 1.0191216430664063, 1.0186157836914063, 1.019087890625, 1.019009033203125, 1.018883056640625, 1.0186516723632812, 1.018503173828125, 1.018767333984375, 1.0181427001953125, 1.0188851318359375, 1.0190079956054687, 1.0191380615234376, 1.0195916748046876, 1.018988525390625, 1.0186444702148438, 1.0186322021484375, 1.0188236694335937, 1.0185963745117188, 1.0187489013671875, 1.0191278076171875, 1.0186588134765624, 1.0191104125976562, 1.0196490478515625, 1.019409423828125, 1.0184796142578125, 1.0184622192382813, 2.120321044921875, 1.018513427734375, 1.0190059814453125, 1.0185799560546875, 1.0189691162109376, 1.0192957153320312, 1.0182420654296875, 1.0181099243164062, 1.018471435546875, 1.0179880981445313, 1.01793994140625, 1.0180792236328124, 1.0191585083007813, 1.0183577880859376, 1.0186967163085938, 1.019025390625, 1.018302490234375, 1.0182195434570311, 1.0180986938476562, 1.0181119995117187, 1.018119140625, 1.0185779418945313, 1.0192568359375, 1.01869775390625, 1.0190448608398437, 1.0191011962890626, 1.0185687255859375, 1.0183587646484376, 1.0186629028320313, 1.018440673828125, 1.0184990844726562, 1.0189854736328126, 1.0192864990234376, 1.0188861694335938, 1.0186168212890625, 1.0193828125, 1.019452392578125, 1.018482666015625, 1.0182092895507813, 1.0185973510742188, 1.0184847412109375, 1.0187908935546874, 1.0185379638671874, 1.0185676879882812, 1.0197432250976564, 1.0195404663085939, 1.0186229858398437, 1.0183670043945312, 1.01861376953125, 1.0180689697265626, 1.019240478515625, 1.0194298706054687, 1.0187018432617188, 1.0183311157226562, 1.0196664428710938, 1.01880322265625, 1.019241455078125, 1.01879296875, 1.0191953735351562, 1.0192783203125, 1.0192066650390625, 1.0193878784179689, 1.0188646240234376, 2.12357421875, 1.019025390625, 1.0193284912109375, 1.0185912475585936, 1.0186680297851562, 1.0185154418945312, 1.0190069580078125, 1.0191072998046875, 1.0187960205078126, 1.0190796508789062, 1.0191022338867188, 1.01871923828125, 1.0191728515625, 1.0182952880859375, 1.0185615234375, 1.0188533935546875, 1.01926806640625, 1.0197380981445312, 1.0183946533203125, 1.019536376953125, 1.0187807006835938, 1.0187202758789062, 1.0181488647460937, 1.01829736328125, 1.0191585083007813, 1.0188114013671874, 1.0189260864257812, 1.0182645874023437, 1.0184970092773438, 1.01850830078125, 1.0195537719726562, 1.0186332397460938, 1.018650634765625, 1.018513427734375, 1.0189107055664062, 1.0193633422851562, 1.0195169067382812, 1.0182993774414062, 1.0184263916015626, 1.019188232421875, 1.0184898681640624, 1.0184581298828126, 1.018377197265625, 1.0180526123046876, 1.018682373046875, 1.019114501953125, 1.01926708984375, 1.018660888671875, 1.0182830200195312, 1.0192527465820314, 1.0191104125976562, 1.0186793212890626, 1.0189076538085937, 1.0181212158203126, 1.0184489135742187, 1.0176378784179687, 1.0188114013671874, 1.01772900390625, 1.0189046020507813, 1.0190069580078125, 1.0186148071289063, 1.0183670043945312, 1.019093017578125, 2.122827880859375, 1.0188871459960938, 1.0193141479492187, 1.0191267700195312, 1.0201989135742187, 1.018756103515625, 1.0183936157226563, 1.0180618286132812, 1.0186639404296876, 1.018534912109375, 1.0186199340820312, 1.0191769409179687, 1.0182215576171876, 1.0179891357421875, 1.0194401245117188, 1.019241455078125, 1.019087890625, 1.0191400756835938, 1.0193930053710938, 1.0193981323242187, 1.0194821166992187, 1.0188114013671874, 1.0183598022460938, 1.0190151977539061, 1.0194656982421875, 1.0182840576171874, 1.0186342163085937, 1.0182359008789061, 1.0182963256835937, 1.018186767578125, 1.0188738403320312, 1.0185728149414062, 1.0182052001953126, 1.018392578125, 1.019826171875, 1.018545166015625, 1.0191267700195312, 1.0181467895507812, 1.0188943481445312, 1.0193899536132813, 1.0195814208984375, 1.0194718627929686, 1.0192506713867187, 1.0194667358398437, 1.0197903442382812, 1.0192322387695312, 1.0195476684570313, 1.0191769409179687, 1.0188554077148437, 1.0196582641601561, 1.0197176513671875, 1.019526123046875, 1.0196561889648437, 1.0197647094726563, 1.0200924072265625, 1.0190786743164062, 1.019673583984375, 1.0185328369140625, 1.0187786254882814, 1.0190172119140626, 1.019193359375, 1.0191861572265626, 1.0196377563476562]",tokens/s,0.966643504663527,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3203.223552,5128.060928,0.0,4481.613824,4276.256768,s,10,3.1983958740234373,0.31983958740234375,0.001538131138588488,0.31976911926269536,0.32137343139648433,0.3217927795410156,0.3221282580566406,"[0.31961990356445313, 0.3212218933105469, 0.3187176513671875, 0.3199183349609375, 0.3183829040527344, 0.3169226989746094, 0.31905938720703125, 0.32106072998046875, 0.32128024291992185, 0.32221212768554686]",tokens/s,800.4012326277909,kWh,3.750147018581629e-06,2.054515901244258e-06,1.6957018773937002e-05,2.276168169376289e-05,tokens/kWh,11246972.14574214,MB,3203.223552,5128.060928,0.0,4481.613824,4465.662976,s,10,186.97800781249998,18.69780078125,0.017296888320766722,18.693263671875002,18.720369140625,18.729755859375,18.737265234375002,"[18.718283203125, 18.689103515625, 18.695333984375, 18.678984375, 18.739142578125, 18.694673828125, 18.691853515625, 18.6855234375, 18.701947265625, 18.683162109375]",tokens/s,3.369380214125283,kWh,0.00022053018352016812,0.00012086672241809653,0.0009756264419588673,0.001317023347897132,tokens/kWh,47835.14286257035,,s,629,189.55983416748046,0.3013669859578386,0.038019298519587436,0.29649102783203124,0.2979925964355469,0.29949728393554687,0.6154752392578126,"[0.29722113037109377, 0.2962104187011719, 0.2994401550292969, 0.3005357666015625, 0.30041702270507814, 0.2999255065917969, 0.30030029296875, 0.3005870056152344, 0.30005453491210937, 0.3003166809082031, 0.2982318115234375, 0.29764404296875, 0.29678591918945313, 0.2977320861816406, 0.2972508239746094, 0.2988697509765625, 0.29594931030273436, 0.29598825073242185, 0.29595953369140626, 0.2961408081054687, 0.29663333129882813, 0.2959800415039063, 0.2984028015136719, 0.29736856079101565, 0.29788876342773435, 0.2962769775390625, 0.2963660888671875, 0.2973767700195312, 0.2967070617675781, 0.2959728698730469, 0.2965258178710938, 0.29611212158203126, 0.2976409606933594, 0.2969599914550781, 0.2967398376464844, 0.2962391052246094, 0.29641522216796873, 0.2959902648925781, 0.29629336547851565, 0.29671832275390625, 0.29604147338867187, 0.2959872131347656, 0.2960855102539062, 0.2966855773925781, 0.29605990600585935, 0.29604147338867187, 0.29675726318359374, 0.29607012939453126, 0.2964500427246094, 0.29650942993164064, 0.2962022399902344, 0.2960516967773438, 0.29594830322265625, 0.296783935546875, 0.296870849609375, 0.29710232543945314, 0.29665484619140625, 0.29788363647460936, 0.29600152587890627, 0.29640191650390624, 0.29616229248046877, 0.2960711669921875, 0.6185267333984374, 0.29601278686523436, 0.29589199829101565, 0.29706646728515623, 0.29594418334960937, 0.29600460815429686, 0.29592166137695314, 0.2975078430175781, 0.2961551208496094, 0.29602200317382815, 0.2963240966796875, 0.2960445556640625, 0.2958970947265625, 0.2962135009765625, 0.29608447265625, 0.29607730102539065, 0.29639883422851565, 0.29659750366210935, 0.29723443603515626, 0.29676031494140626, 0.2970972290039062, 0.296911865234375, 0.29686373901367186, 0.29711358642578123, 0.29724774169921875, 0.29758157348632813, 0.29762457275390625, 0.2963056640625, 0.29703271484375, 0.29604864501953126, 0.2962616271972656, 0.296153076171875, 0.2962769775390625, 0.2961919860839844, 0.2968739929199219, 0.29633843994140624, 0.2965504150390625, 0.29617355346679686, 0.29583154296875, 0.2962042846679688, 0.2970060729980469, 0.2965278625488281, 0.29644491577148435, 0.29606298828125, 0.29650225830078125, 0.2965186462402344, 0.3018721313476562, 0.29692620849609375, 0.2963681640625, 0.29674798583984374, 0.296806396484375, 0.29622067260742185, 0.2960076904296875, 0.2968924255371094, 0.2960343017578125, 0.2965831604003906, 0.2970480651855469, 0.29702047729492187, 0.296217529296875, 0.29626266479492186, 0.29842022705078125, 0.29761843872070315, 0.29686373901367186, 0.6160445556640625, 0.2960199890136719, 0.2965903015136719, 0.2964684753417969, 0.29644390869140624, 0.2965688171386719, 0.2976983032226562, 0.29753759765625, 0.29599737548828126, 0.29653094482421877, 0.2970838928222656, 0.2961479797363281, 0.29596878051757813, 0.29587353515625, 0.2960445556640625, 0.2966005859375, 0.2975467529296875, 0.2962995300292969, 0.2958847961425781, 0.2957864990234375, 0.2960732116699219, 0.295973876953125, 0.2962944030761719, 0.29851339721679687, 0.295984130859375, 0.29607730102539065, 0.2961203308105469, 0.2960425109863281, 0.29593295288085936, 0.29733270263671874, 0.29703067016601564, 0.29734808349609376, 0.29631283569335937, 0.296816650390625, 0.29681048583984376, 0.296489990234375, 0.29625653076171876, 0.29661590576171876, 0.29632000732421876, 0.2966937561035156, 0.2978652038574219, 0.2965299072265625, 0.2963240966796875, 0.29751806640625, 0.29830859375, 0.297481201171875, 0.29719039916992185, 0.29953536987304685, 0.29737368774414064, 0.29692108154296876, 0.296585205078125, 0.296052734375, 0.29637939453125, 0.29625753784179687, 0.29655654907226564, 0.29799423217773435, 0.2968268737792969, 0.29685043334960937, 0.29692108154296876, 0.29714944458007814, 0.29722930908203127, 0.2967623596191406, 0.297270263671875, 0.6168934326171875, 0.29688626098632814, 0.296279052734375, 0.29670297241210936, 0.2962923583984375, 0.29618795776367185, 0.2966793518066406, 0.2960650329589844, 0.29615924072265626, 0.29594418334960937, 0.2973972473144531, 0.29612338256835935, 0.2971064453125, 0.2982297668457031, 0.29685043334960937, 0.2960855102539062, 0.296089599609375, 0.29661489868164065, 0.2965350341796875, 0.29597695922851563, 0.2961705017089844, 0.29597695922851563, 0.29592276000976564, 0.29643975830078123, 0.29586328125, 0.29596466064453125, 0.2959923095703125, 0.2958981018066406, 0.29643777465820315, 0.2959923095703125, 0.29621759033203127, 0.2963650512695313, 0.29609677124023437, 0.29788980102539064, 0.2977914733886719, 0.29713613891601565, 0.2964951171875, 0.29727642822265626, 0.2966794128417969, 0.2964142150878906, 0.2965821533203125, 0.2960855102539062, 0.2957701416015625, 0.2966097717285156, 0.29938388061523435, 0.29615609741210935, 0.29603640747070314, 0.2962728271484375, 0.29596978759765624, 0.2960455627441406, 0.296300537109375, 0.29595040893554686, 0.29649197387695314, 0.29635379028320313, 0.296342529296875, 0.29667330932617186, 0.2967008972167969, 0.29659442138671877, 0.29601690673828124, 0.29651455688476563, 0.29645925903320314, 0.2973665466308594, 0.29616226196289064, 0.615267333984375, 0.29617767333984374, 0.29771673583984376, 0.29707980346679685, 0.298039306640625, 0.297238525390625, 0.2971115417480469, 0.29701837158203126, 0.29682891845703124, 0.2972641296386719, 0.2971668395996094, 0.29752627563476564, 0.2972252197265625, 0.29701632690429686, 0.29692620849609375, 0.29619302368164063, 0.2964234619140625, 0.2987468566894531, 0.296131591796875, 0.2958981018066406, 0.29623501586914064, 0.29586944580078123, 0.295846923828125, 0.29605377197265625, 0.29625753784179687, 0.29595443725585935, 0.29606195068359376, 0.29590631103515624, 0.29597491455078123, 0.29599948120117187, 0.296310791015625, 0.2964664306640625, 0.29622885131835935, 0.2963630065917969, 0.29594317626953126, 0.2961397705078125, 0.29812017822265624, 0.3001466979980469, 0.3003924560546875, 0.3000289306640625, 0.29983538818359373, 0.29985791015625, 0.29986712646484376, 0.29981594848632814, 0.30056344604492186, 0.29986407470703125, 0.2999111633300781, 0.29782833862304686, 0.2960773315429687, 0.2960137939453125, 0.29763687133789063, 0.2979921875, 0.2981877746582031, 0.2967510986328125, 0.29627288818359376, 0.29777716064453125, 0.2970634155273438, 0.30057470703125, 0.2982738037109375, 0.2991800231933594, 0.30042315673828124, 0.2963568725585938, 0.2961418151855469, 0.6139617309570312, 0.29619406127929687, 0.29618585205078124, 0.29586431884765624, 0.296079345703125, 0.29643060302734375, 0.2977740783691406, 0.2977832946777344, 0.2964633483886719, 0.29604147338867187, 0.2960609130859375, 0.29627084350585936, 0.29602822875976564, 0.2961315307617188, 0.29630160522460935, 0.29599432373046874, 0.2961469421386719, 0.296531982421875, 0.2960977783203125, 0.2960373840332031, 0.2965381164550781, 0.2960506896972656, 0.29609063720703127, 0.2962063293457031, 0.2960169677734375, 0.29643975830078123, 0.2966312866210937, 0.2963077087402344, 0.2960865173339844, 0.2961162109375, 0.2960281677246094, 0.2961766357421875, 0.296352783203125, 0.2976133117675781, 0.29756414794921876, 0.29727435302734373, 0.29644595336914065, 0.2974146423339844, 0.30141543579101565, 0.2976788330078125, 0.2980526123046875, 0.2980894775390625, 0.29788058471679685, 0.2963497009277344, 0.29696920776367186, 0.29795635986328123, 0.29711566162109376, 0.29751296997070314, 0.29756314086914065, 0.2974320678710938, 0.29744332885742186, 0.29654324340820315, 0.29747711181640624, 0.2984744873046875, 0.29649203491210935, 0.2962442321777344, 0.2966384582519531, 0.2963189697265625, 0.29589913940429685, 0.2962176208496094, 0.2958837585449219, 0.29590936279296876, 0.29634048461914064, 0.6155560913085938, 0.29629541015625, 0.2960916442871094, 0.2964561767578125, 0.29645208740234374, 0.2960373840332031, 0.2963875732421875, 0.2963620300292969, 0.2962861633300781, 0.29651251220703123, 0.29747915649414064, 0.2981519470214844, 0.29662823486328127, 0.2976133117675781, 0.29637530517578126, 0.29608038330078124, 0.29605682373046877, 0.2960496520996094, 0.29625140380859377, 0.29587966918945313, 0.2968821716308594, 0.29635174560546873, 0.29616537475585936, 0.3003904113769531, 0.2970214538574219, 0.2966312866210937, 0.29685556030273436, 0.29636813354492186, 0.29714739990234373, 0.29626266479492186, 0.29595135498046876, 0.2962196350097656, 0.29621759033203127, 0.29663333129882813, 0.2961408081054687, 0.2965729370117188, 0.2961131591796875, 0.2963865661621094, 0.296479736328125, 0.296900634765625, 0.2964776611328125, 0.296848388671875, 0.29701937866210937, 0.29931622314453127, 0.29786627197265625, 0.2962636413574219, 0.29623806762695315, 0.2966210632324219, 0.2961336364746094, 0.2963711853027344, 0.29625140380859377, 0.2967357482910156, 0.29616845703125, 0.2963599853515625, 0.29687493896484374, 0.296300537109375, 0.2967142333984375, 0.29815499877929685, 0.29702349853515625, 0.29747610473632813, 0.296627197265625, 0.29662515258789063, 0.2977822570800781, 0.6183147583007812, 0.29672344970703124, 0.296764404296875, 0.29664154052734376, 0.2967080993652344, 0.29716583251953127, 0.29694155883789064, 0.2967244873046875, 0.2972221374511719, 0.2974392395019531, 0.2972866516113281, 0.2969518127441406, 0.2982625427246094, 0.296279052734375, 0.29735015869140624, 0.2961131591796875, 0.2980966491699219, 0.29623806762695315, 0.29628826904296873, 0.2956605529785156, 0.29607012939453126, 0.2962001953125, 0.29612442016601564, 0.29620736694335936, 0.29724774169921875, 0.2960086975097656, 0.297548828125, 0.2967602844238281, 0.29649102783203124, 0.2963619384765625, 0.2962708740234375, 0.29639984130859376, 0.2968350830078125, 0.2963292236328125, 0.296099853515625, 0.29622784423828125, 0.29627093505859375, 0.296197021484375, 0.2974996337890625, 0.2962104187011719, 0.2960639953613281, 0.29599948120117187, 0.2959902648925781, 0.2965739440917969, 0.2961817626953125, 0.2968320007324219, 0.29708084106445315, 0.2968320007324219, 0.29710540771484373, 0.2960558166503906, 0.2963138427734375, 0.29637326049804685, 0.29592166137695314, 0.2962083740234375, 0.29609573364257813, 0.2962114562988281, 0.2961203308105469, 0.29699481201171873, 0.29659442138671877, 0.29610906982421875, 0.29717401123046877, 0.29671218872070315, 0.2965350341796875, 0.61785498046875, 0.29675213623046875, 0.29679409790039063, 0.29726925659179687, 0.29639578247070314, 0.29744537353515627, 0.2977556457519531, 0.29687704467773435, 0.2974320678710938, 0.297416748046875, 0.29766244506835937, 0.29807000732421873, 0.2972119140625, 0.2973388671875, 0.2967439270019531, 0.2972999572753906, 0.2976030578613281, 0.2986280822753906, 0.29623602294921875, 0.29600152587890627, 0.2964695739746094, 0.2963936767578125, 0.2961868896484375, 0.29702349853515625, 0.29616024780273437, 0.29621148681640624, 0.2964879150390625, 0.29709210205078124, 0.2963056640625, 0.2973767700195312, 0.2969722900390625, 0.29783346557617185, 0.2965718994140625, 0.2977576904296875, 0.2961418151855469, 0.2967091064453125, 0.2973644714355469, 0.29671218872070315, 0.29666201782226564, 0.296838134765625, 0.2965801086425781, 0.2962391052246094, 0.296089599609375, 0.2963804016113281, 0.29701119995117187, 0.2961469421386719, 0.29617767333984374, 0.296384521484375, 0.296369140625, 0.29728460693359376, 0.2963015747070312, 0.2967838745117187, 0.29621554565429686, 0.2963097534179687, 0.29686373901367186, 0.2963947448730469, 0.2962995300292969, 0.29706649780273436, 0.2967142333984375, 0.29672857666015623, 0.2966722412109375, 0.2982359008789062, 0.2979543151855469, 0.6189783325195313, 0.2962237548828125, 0.2969938049316406, 0.29632614135742186, 0.2961541748046875, 0.2965380554199219, 0.2963343505859375, 0.29643777465820315, 0.29634866333007814, 0.2962872314453125, 0.2963630065917969, 0.29629644775390623, 0.29665484619140625, 0.296310791015625, 0.29620120239257813, 0.29598513793945314, 0.29617767333984374, 0.2959667053222656, 0.2962749328613281, 0.2967930908203125, 0.29673779296875, 0.29692825317382815, 0.29633331298828125, 0.29688934326171873, 0.2962821044921875, 0.29612954711914063, 0.2987386779785156, 0.2962083740234375, 0.296374267578125, 0.2962135009765625, 0.29653402709960935, 0.2968299560546875, 0.2963097534179687, 0.2981970520019531, 0.2961192321777344, 0.2962135009765625, 0.2961069946289063, 0.295920654296875, 0.2961459655761719, 0.29606500244140627, 0.29621453857421876, 0.29600564575195315, 0.29583566284179685, 0.2993377380371094, 0.29691802978515625, 0.2961561584472656, 0.2958428039550781, 0.2965329895019531, 0.29672549438476564, 0.29595547485351564, 0.2960772705078125, 0.29593496704101563, 0.2966548767089844, 0.2969466552734375, 0.2978447265625, 0.296627197265625, 0.29650433349609373, 0.2975867004394531, 0.2982461853027344, 0.29639166259765626, 0.29643161010742186, 0.29704702758789064, 0.29672344970703124]",tokens/s,3.3182134958203435,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1390.702592,1340.604416,0.0,694.157312,598.499328,s,10,0.6226632919311523,0.062266329193115234,0.002317832326909016,0.06196879959106445,0.06357897109985351,0.06612861976623535,0.06816833869934083,"[0.06867826843261719, 0.06029948806762695, 0.06301238250732422, 0.06221964645385742, 0.06201440048217773, 0.06192319869995117, 0.06178224182128906, 0.06027980804443359, 0.06031568145751953, 0.06213817596435547]",tokens/s,4111.3713192571795,kWh,7.181983443623039e-07,3.9353970671072603e-07,2.1138383914060147e-06,3.2255764424790445e-06,tokens/kWh,79365658.99621,MB,1391.030272,1340.604416,0.0,694.157312,659.032576,s,10,37.91430688476563,3.791430688476563,0.033463632163375424,3.808232421875,3.8238944580078122,3.8254893432617187,3.8267652514648436,"[3.8235400390625, 3.807294921875, 3.809169921875, 3.815871337890625, 3.827084228515625, 3.813744140625, 3.745643798828125, 3.73081494140625, 3.753345703125, 3.7877978515625]",tokens/s,16.616418754924958,kWh,4.459946840148583e-05,2.4442959093412353e-05,0.00012145201046119378,0.00019049443795609197,tokens/kWh,330718.3174267859,,s,629,38.408798217773445,0.06106327220631707,0.007411049818688859,0.06058700942993164,0.06105620422363281,0.061356236267089845,0.1198044711303711,"[0.05935715103149414, 0.059200511932373044, 0.06025625610351563, 0.06658048248291015, 0.06115430450439453, 0.06075187301635742, 0.06081740951538086, 0.06116556930541992, 0.06187519836425781, 0.060846080780029295, 0.0618015022277832, 0.061259742736816405, 0.06081024169921875, 0.06082559967041016, 0.06123110580444336, 0.06096486282348633, 0.06082559967041016, 0.06090752029418945, 0.06103142547607422, 0.05951590347290039, 0.05919027328491211, 0.060930049896240235, 0.06138163375854492, 0.06130278396606445, 0.0615464973449707, 0.061396991729736325, 0.06103859329223633, 0.060919807434082034, 0.06081228637695312, 0.06087680053710937, 0.06095257568359375, 0.06107648086547852, 0.06047641754150391, 0.05975551986694336, 0.060652542114257815, 0.060800033569335936, 0.06089315032958984, 0.060319744110107425, 0.06068940734863281, 0.06150451278686524, 0.06105190277099609, 0.061037567138671874, 0.059990016937255856, 0.060765182495117184, 0.06077849578857422, 0.06057881546020508, 0.060082176208496096, 0.0589219856262207, 0.059184127807617185, 0.05976166534423828, 0.060783649444580076, 0.06094025421142578, 0.06090854263305664, 0.060641281127929686, 0.05964799880981445, 0.05914112091064453, 0.05936537551879883, 0.06067507171630859, 0.06081740951538086, 0.060158977508544924, 0.06099660873413086, 0.05982624053955078, 0.11980588531494141, 0.06112768173217773, 0.06088294219970703, 0.060576766967773435, 0.06073548889160156, 0.06021529769897461, 0.06116966247558594, 0.06180044937133789, 0.06070483016967773, 0.05943494415283203, 0.059440128326416014, 0.060862464904785155, 0.060211200714111325, 0.05941452789306641, 0.06035456085205078, 0.060521472930908204, 0.06083174514770508, 0.06059929656982422, 0.060902400970458986, 0.06084096145629883, 0.06026649475097656, 0.05905920028686523, 0.059622398376464845, 0.06106521606445312, 0.06076313781738281, 0.05981081771850586, 0.05929369735717773, 0.059216896057128904, 0.061050880432128904, 0.06076620864868164, 0.059224063873291016, 0.06019583892822265, 0.06055321502685547, 0.060796928405761716, 0.061064193725585934, 0.06083071899414062, 0.060793857574462894, 0.05939507293701172, 0.058877952575683595, 0.05913702392578125, 0.05904793548583984, 0.05907865524291992, 0.06065049743652344, 0.06088294219970703, 0.06102732849121094, 0.06089215850830078, 0.06101504135131836, 0.060728321075439455, 0.06069760131835938, 0.060909599304199216, 0.06107542419433594, 0.060761089324951174, 0.06076416015625, 0.060911617279052734, 0.06071091079711914, 0.06042009735107422, 0.06087372970581055, 0.06067302322387695, 0.06060851287841797, 0.060984321594238285, 0.06090956878662109, 0.06082867050170898, 0.060943359375, 0.12434432220458984, 0.059063297271728515, 0.05924863815307617, 0.059273216247558595, 0.059202560424804686, 0.0593807373046875, 0.05934284973144531, 0.06072115325927734, 0.060693504333496094, 0.06077439880371094, 0.0608798713684082, 0.05987430572509766, 0.05933158493041992, 0.05966438293457031, 0.05923430252075195, 0.06037811279296875, 0.060916736602783204, 0.06084505462646484, 0.060793857574462894, 0.06016204833984375, 0.06031257629394531, 0.060813312530517576, 0.06093008041381836, 0.06087472152709961, 0.06037811279296875, 0.06069964981079102, 0.06112972640991211, 0.06072524642944336, 0.060851200103759766, 0.06091468811035156, 0.0610334701538086, 0.0608901138305664, 0.06094847869873047, 0.06083071899414062, 0.060846080780029295, 0.06088806533813477, 0.06078566360473633, 0.06029625701904297, 0.06034220886230469, 0.06101094436645508, 0.06097919845581055, 0.061350910186767575, 0.05945548629760742, 0.05929779052734375, 0.059101184844970706, 0.059858943939208986, 0.05947903823852539, 0.05919948959350586, 0.059469825744628904, 0.06209024047851563, 0.06109183883666992, 0.06083071899414062, 0.06089318466186523, 0.06092083358764649, 0.06089113616943359, 0.06088601684570313, 0.06026956939697266, 0.06099763107299805, 0.06088499069213867, 0.06075494384765625, 0.06154547119140625, 0.0613570556640625, 0.06097919845581055, 0.12348928070068359, 0.060916736602783204, 0.060777473449707034, 0.0607303695678711, 0.06054297637939453, 0.06096694564819336, 0.06082352066040039, 0.060835838317871094, 0.06073855972290039, 0.060864513397216796, 0.05947903823852539, 0.059015167236328124, 0.059270145416259766, 0.05909404754638672, 0.05935817718505859, 0.05992550277709961, 0.06107955169677735, 0.061110271453857425, 0.06092083358764649, 0.06084505462646484, 0.06058803176879883, 0.06083891296386719, 0.06079487991333008, 0.060827648162841794, 0.060832767486572265, 0.06084403228759765, 0.06105702209472656, 0.06056345748901367, 0.06058598327636719, 0.06076006317138672, 0.06095462417602539, 0.0609617919921875, 0.06127206420898437, 0.05955788803100586, 0.06099967956542969, 0.06095667266845703, 0.06068633651733398, 0.060887039184570314, 0.06139187240600586, 0.061454334259033204, 0.06054912185668945, 0.06096281433105469, 0.06029414367675781, 0.06135500717163086, 0.062328830718994144, 0.06116147232055664, 0.0603770866394043, 0.05916262435913086, 0.059469825744628904, 0.0593438720703125, 0.05933055877685547, 0.05895884704589844, 0.05933670425415039, 0.06046003341674805, 0.06081024169921875, 0.06076620864868164, 0.061451263427734375, 0.06086348724365234, 0.06087168121337891, 0.06075699234008789, 0.060631038665771485, 0.0605296630859375, 0.06076006317138672, 0.12325580596923828, 0.060826625823974606, 0.06067612838745117, 0.06101910400390625, 0.06079283142089844, 0.06072115325927734, 0.06085529708862305, 0.06092902374267578, 0.0608983039855957, 0.060955646514892575, 0.06091059112548828, 0.060889087677001956, 0.06080716705322266, 0.059873279571533204, 0.05963673782348633, 0.06089932632446289, 0.061224960327148435, 0.06137139129638672, 0.06110105514526367, 0.06112870407104492, 0.06075699234008789, 0.0609249267578125, 0.0607457275390625, 0.06121267318725586, 0.060843006134033206, 0.06098124694824219, 0.061001728057861325, 0.06096588897705078, 0.06096895980834961, 0.06099558258056641, 0.06101708984375, 0.06083174514770508, 0.06094643020629883, 0.06118297576904297, 0.06099148941040039, 0.06102937698364258, 0.06078771209716797, 0.06082457733154297, 0.059865089416503904, 0.060216320037841796, 0.059087871551513675, 0.05941452789306641, 0.06077439880371094, 0.06028083038330078, 0.06068121719360352, 0.06069760131835938, 0.06096691131591797, 0.06012416076660156, 0.06075494384765625, 0.06013849639892578, 0.06086963272094727, 0.060609535217285154, 0.06098739242553711, 0.06088294219970703, 0.06077337646484375, 0.06087782287597656, 0.060805118560791016, 0.06075801467895508, 0.060829696655273435, 0.060837886810302735, 0.060200958251953124, 0.060911617279052734, 0.06101094436645508, 0.1231247329711914, 0.060744705200195315, 0.06079897689819336, 0.0600002555847168, 0.059115520477294924, 0.0603351058959961, 0.06076620864868164, 0.060727294921875, 0.060744705200195315, 0.060527614593505856, 0.06128844833374023, 0.06094438552856445, 0.060677120208740234, 0.06176768112182617, 0.062110721588134764, 0.0609249267578125, 0.060832767486572265, 0.060432384490966794, 0.06098636627197265, 0.06112972640991211, 0.060895233154296874, 0.062491649627685546, 0.060980224609375, 0.06099558258056641, 0.06110310363769531, 0.06090956878662109, 0.060767230987548826, 0.06076211166381836, 0.06077542495727539, 0.06094847869873047, 0.06072115325927734, 0.06075392150878906, 0.060911617279052734, 0.06179123306274414, 0.06146355056762695, 0.06151174545288086, 0.05984864044189453, 0.0592097282409668, 0.059241470336914064, 0.0594442253112793, 0.05915238571166992, 0.05912063980102539, 0.059140094757080076, 0.05916159820556641, 0.05927219009399414, 0.059033599853515625, 0.059902976989746094, 0.06051942443847656, 0.060739585876464844, 0.06076313781738281, 0.060609535217285154, 0.059551742553710936, 0.05947699356079102, 0.06035257720947266, 0.060338111877441404, 0.06081126403808594, 0.06072524642944336, 0.060757022857666015, 0.06074979019165039, 0.060472320556640625, 0.06065459060668945, 0.06086656188964844, 0.06003609466552735, 0.12312576293945313, 0.06074060821533203, 0.06058700942993164, 0.060955646514892575, 0.060897281646728516, 0.060668926239013675, 0.05904793548583984, 0.059138046264648435, 0.0591984977722168, 0.059074527740478514, 0.05915545654296875, 0.05922918319702149, 0.05921279907226563, 0.05931417465209961, 0.05925068664550781, 0.05924665451049805, 0.05927315139770508, 0.062034942626953124, 0.06089625549316406, 0.05932646560668945, 0.05910630416870117, 0.05935615921020508, 0.059224063873291016, 0.0590909423828125, 0.05909299087524414, 0.05916159820556641, 0.05918105697631836, 0.05901004791259765, 0.05914828872680664, 0.05935718536376953, 0.059216896057128904, 0.05914214324951172, 0.05914214324951172, 0.05903564834594727, 0.05917388916015625, 0.059701248168945314, 0.05926399993896484, 0.05921177673339844, 0.059167774200439456, 0.05920764923095703, 0.05906227111816406, 0.05908992004394531, 0.059063297271728515, 0.05934694290161133, 0.05926707077026367, 0.05919027328491211, 0.05903462219238281, 0.0592988166809082, 0.05978316879272461, 0.06010367965698242, 0.05926502227783203, 0.059202560424804686, 0.05930092620849609, 0.05915334320068359, 0.059085823059082034, 0.059358207702636716, 0.05939199829101562, 0.05905929565429688, 0.05903247833251953, 0.05921382522583008, 0.059084800720214846, 0.05944319915771484, 0.059240447998046876, 0.11974553680419922, 0.059086910247802736, 0.05899875259399414, 0.05906224060058594, 0.059066368103027345, 0.059218944549560545, 0.05904281616210937, 0.05910220718383789, 0.05922304153442383, 0.059017215728759766, 0.05900185775756836, 0.05915750503540039, 0.059101184844970706, 0.05916262435913086, 0.05922099304199219, 0.059033599853515625, 0.05897625732421875, 0.05926604843139648, 0.059210750579833986, 0.059121662139892575, 0.05920358276367187, 0.05924556732177735, 0.059066368103027345, 0.05899776077270508, 0.05931008148193359, 0.06076620864868164, 0.059312126159667966, 0.059284481048583984, 0.059046913146972656, 0.05910425567626953, 0.059017215728759766, 0.05894041442871094, 0.05906227111816406, 0.05915955352783203, 0.059063297271728515, 0.05913600158691406, 0.05902233505249024, 0.059099136352539064, 0.059030529022216796, 0.05913497543334961, 0.0589035530090332, 0.05907353591918945, 0.059066368103027345, 0.058962944030761716, 0.059486209869384764, 0.059453441619873044, 0.058998783111572264, 0.058987518310546876, 0.05903564834594727, 0.05900185775756836, 0.05933977508544922, 0.05908070373535156, 0.059009025573730466, 0.06041702270507812, 0.05994598388671875, 0.058993663787841794, 0.05916262435913086, 0.059085823059082034, 0.05953638458251953, 0.05912473678588867, 0.05950054550170898, 0.060255264282226564, 0.05989884948730469, 0.11980083465576172, 0.059218944549560545, 0.05915238571166992, 0.059079681396484375, 0.05905715179443359, 0.059115520477294924, 0.05936025619506836, 0.0590643196105957, 0.05912575912475586, 0.05918207931518555, 0.06097919845581055, 0.059486209869384764, 0.05931622314453125, 0.059044864654541014, 0.05903363037109375, 0.059218910217285155, 0.059025409698486325, 0.05917491149902344, 0.05916364669799805, 0.05909401702880859, 0.0590489616394043, 0.05904076766967774, 0.059096065521240235, 0.059121662139892575, 0.05902438354492188, 0.05902131271362305, 0.05898854446411133, 0.05910015869140625, 0.059300865173339844, 0.05925785446166992, 0.06097612762451172, 0.060662784576416016, 0.060878849029541014, 0.06074060821533203, 0.06073446273803711, 0.06080412673950195, 0.06049481582641601, 0.06058700942993164, 0.060581886291503906, 0.0608092155456543, 0.059243518829345705, 0.059063297271728515, 0.05907251358032226, 0.05892403030395508, 0.05899776077270508, 0.05919744110107422, 0.05904793548583984, 0.059481086730957033, 0.05923942565917969, 0.05914419174194336, 0.05912985610961914, 0.059145217895507814, 0.05928550338745117, 0.059261951446533206, 0.05939199829101562, 0.05910323333740235, 0.05878988647460937, 0.059832321166992185, 0.061055999755859375, 0.060690433502197265, 0.06087174224853516, 0.06086547088623047, 0.060862464904785155, 0.12235059356689452, 0.05892607879638672, 0.05911859130859375, 0.05902950286865234, 0.05892403030395508, 0.058929153442382816, 0.05973811340332031, 0.059853824615478515, 0.058992641448974606, 0.059038719177246096, 0.05915545654296875, 0.05913190460205078, 0.058912769317626956, 0.05937152099609375, 0.060902400970458986, 0.06070272064208984, 0.06099353790283203, 0.060695552825927736, 0.059014144897460936, 0.05908172988891602, 0.05928140640258789, 0.059154430389404294, 0.06019583892822265, 0.06073241424560547, 0.0606668815612793, 0.060614654541015625, 0.06086252975463867, 0.06079072189331055, 0.06075801467895508, 0.06062899017333984, 0.06069247817993164, 0.060826625823974606, 0.061156352996826174, 0.06073241424560547, 0.06075392150878906, 0.060655616760253904, 0.06082252883911133, 0.06071705627441406, 0.060818431854248046, 0.060819454193115234, 0.06071091079711914, 0.06072012710571289, 0.0607723503112793, 0.061059070587158204, 0.06127308654785156, 0.060447742462158206, 0.05913907241821289, 0.05907251358032226, 0.06083174514770508, 0.06084505462646484, 0.06077644729614258, 0.060709888458251954, 0.0603054084777832, 0.059133953094482425, 0.05898342514038086, 0.06038323211669922, 0.06078668975830078, 0.059170848846435545, 0.060978145599365235, 0.06088601684570313, 0.06031568145751953, 0.05907247924804687, 0.05908889770507812]",tokens/s,16.3764561555309,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1938.939904,5480.382464,0.0,4833.93536,4503.41376,s,10,5.748573669433593,0.5748573669433594,0.0017742246285147622,0.5745578002929688,0.5774038818359375,0.5778909057617188,0.5782805249023437,"[0.5783779296875, 0.577295654296875, 0.57393994140625, 0.573126220703125, 0.5733052978515625, 0.5724807739257812, 0.5741940307617187, 0.5749215698242187, 0.5756366577148437, 0.5752955932617188]",tokens/s,445.32785821499897,kWh,6.7656206220020495e-06,3.7063076173277895e-06,3.206590528233145e-05,4.2537833521661295e-05,tokens/kWh,6018172.0319545325,MB,1940.045824,5480.382464,0.0,4833.93536,4688.700416,s,10,334.972359375,33.49723593750001,0.0033039843001841955,33.498505859375,33.500188281250004,33.500191796875,33.500194609375,"[33.5001953125, 33.49962890625, 33.4886484375, 33.4951328125, 33.496859375, 33.49850390625, 33.49586328125, 33.5001875, 33.49883203125, 33.4985078125]",tokens/s,1.8807521945257513,kWh,0.0003954924626188515,0.00021676498843461862,0.0018363540061190774,0.0024486114571725475,tokens/kWh,25728.867605948046,,s,629,339.6100241088865,0.5399205470729518,0.06828609533356728,0.531684326171875,0.532189599609375,0.5323219116210938,1.1051400146484376,"[0.5314457397460938, 0.5318953247070313, 0.5313341674804688, 0.5320222778320313, 0.53230078125, 0.5322966918945312, 0.532052978515625, 0.5319219360351563, 0.5317222290039062, 0.5324605712890625, 0.5316915283203125, 0.5319751586914062, 0.5320540161132813, 0.532116455078125, 0.5318584594726562, 0.5316935424804687, 0.5311477661132813, 0.5319874267578125, 0.5314037475585938, 0.5320007934570312, 0.5313106079101563, 0.5315860595703125, 0.5312788696289062, 0.5318707275390625, 0.5317847290039063, 0.5322864379882812, 0.5312890625, 0.5320038452148438, 0.5311918334960938, 0.5317908325195313, 0.53142529296875, 0.5319515991210938, 0.5317119750976562, 0.5320806274414063, 0.5313812255859375, 0.53172119140625, 0.5311549682617187, 0.532105224609375, 0.5318379516601562, 0.5316167602539063, 0.5311580200195313, 0.5316771850585937, 0.5315532836914062, 0.532158447265625, 0.531968017578125, 0.5320550537109375, 0.5314191284179688, 0.5322158203125, 0.5313853149414063, 0.5317304077148437, 0.5311661987304688, 0.5319700317382813, 0.5315399780273438, 0.5316935424804687, 0.531431396484375, 0.5315706787109375, 0.53121435546875, 0.5316372680664062, 0.5313402709960937, 0.531820556640625, 0.5316812744140625, 0.532168701171875, 1.109749755859375, 0.531346435546875, 0.5321318359375, 0.5310955810546875, 0.5315061645507813, 0.531493896484375, 0.5316331787109375, 0.5316638793945313, 0.5315604248046875, 0.531146728515625, 0.5316085815429688, 0.5309306640625, 0.5317273559570312, 0.5312214965820312, 0.5316608276367187, 0.5313013916015625, 0.5319352416992188, 0.5315829467773437, 0.5317447509765625, 0.5317273559570312, 0.53187890625, 0.5315963134765626, 0.53159423828125, 0.5313740844726562, 0.532463623046875, 0.5318870849609375, 0.5320325317382812, 0.5313720092773437, 0.531726318359375, 0.5309389038085938, 0.5318615112304688, 0.5310996704101563, 0.5315245971679687, 0.5312379150390625, 0.5315072021484375, 0.5316474609375, 0.5321410522460938, 0.5322495727539063, 0.5316792602539062, 0.5317243041992188, 0.5318717651367187, 0.531431396484375, 0.5317796020507812, 0.5316239624023438, 0.5317867431640625, 0.5317509155273438, 0.533918701171875, 0.5316608276367187, 0.5317805786132812, 0.5316536254882812, 0.5319649047851562, 0.531599365234375, 0.531768310546875, 0.5318400268554687, 0.5323099975585938, 0.5319802856445313, 0.5322926025390625, 0.5316300659179688, 0.532094970703125, 0.5318133544921875, 0.531968017578125, 0.5319454956054688, 0.5321953125, 1.106044921875, 0.5311734008789063, 0.5317723999023437, 0.5311109008789062, 0.5316505737304688, 0.5316710205078125, 0.5314662475585937, 0.531103759765625, 0.5316044921875, 0.531599365234375, 0.5319229736328125, 0.5315625, 0.5319280395507813, 0.5312767944335938, 0.5315369262695312, 0.5314559936523438, 0.531651611328125, 0.5313187866210938, 0.5317222290039062, 0.5312973022460937, 0.5317273559570312, 0.5311549682617187, 0.531431396484375, 0.531472412109375, 0.532068359375, 0.5310628051757813, 0.5315819702148438, 0.5311876831054687, 0.5319567260742187, 0.5312583618164063, 0.5321011352539062, 0.5313074951171874, 0.5321062622070313, 0.531146728515625, 0.5314539794921875, 0.5311528930664062, 0.5316218872070313, 0.531188720703125, 0.531778564453125, 0.53129931640625, 0.53172021484375, 0.53125634765625, 0.5316557006835938, 0.5312962646484375, 0.5317734375, 0.531356689453125, 0.5318450927734375, 0.5314447631835938, 0.5319024658203125, 0.531715087890625, 0.5318553466796875, 0.5314600830078124, 0.5316638793945313, 0.5312317504882812, 0.5315184936523437, 0.5314949340820313, 0.5321676635742187, 0.5318441162109375, 0.5320765380859375, 0.5313792114257813, 0.5318953247070313, 0.5314150390625, 0.53174169921875, 1.1051417236328125, 0.531535888671875, 0.5323489379882812, 0.5317929077148438, 0.53146728515625, 0.5314027709960938, 0.5317069091796875, 0.5312337646484375, 0.5317662963867188, 0.531146728515625, 0.5315819702148438, 0.531715087890625, 0.5320007934570312, 0.531135498046875, 0.5316823120117188, 0.5312440185546875, 0.531652587890625, 0.5312808837890625, 0.53197314453125, 0.53145703125, 0.5322874755859375, 0.5318850708007813, 0.5318379516601562, 0.5312071533203125, 0.531473388671875, 0.531135498046875, 0.531435546875, 0.5312982788085937, 0.5317243041992188, 0.5311580200195313, 0.5318113403320313, 0.5314652099609375, 0.5317734375, 0.53153076171875, 0.5319024658203125, 0.5314232177734375, 0.5322066040039063, 0.5310986328125, 0.5315245971679687, 0.53191064453125, 0.5318922119140626, 0.53157275390625, 0.5320130615234375, 0.5313024291992188, 0.5326827392578125, 0.5315870971679687, 0.5319966430664063, 0.5312655639648437, 0.5317652587890624, 0.5317560424804687, 0.532094970703125, 0.5314744262695312, 0.5314744262695312, 0.5314232177734375, 0.5318430786132813, 0.531373046875, 0.5317376098632812, 0.5314959106445313, 0.53226904296875, 0.5319168090820312, 0.5326305541992188, 0.5315430297851562, 0.5318215942382812, 1.1051356201171876, 0.5312973022460937, 0.5315819702148438, 0.5314774780273438, 0.531794921875, 0.5313423461914063, 0.5317007446289063, 0.5312061157226563, 0.532337646484375, 0.5311897583007813, 0.5318450927734375, 0.5311968994140625, 0.5319700317382813, 0.5320519409179687, 0.5319915771484375, 0.531267578125, 0.5318911743164062, 0.531177490234375, 0.5319485473632812, 0.5313751220703125, 0.5316874389648437, 0.5312163696289063, 0.5318246459960938, 0.5318041381835937, 0.5321021728515625, 0.5312501831054688, 0.5314898071289063, 0.5313935546875, 0.5318809814453125, 0.5312296752929687, 0.5316013793945312, 0.5316351928710937, 0.5323223266601562, 0.5317406616210938, 0.5318369140625, 0.5311907958984375, 0.5315348510742187, 0.5314457397460938, 0.531673095703125, 0.53165771484375, 0.5315717163085938, 0.531652587890625, 0.5317069091796875, 0.5313167114257813, 0.53161474609375, 0.5311897583007813, 0.5315440673828125, 0.5312553100585937, 0.5322280883789062, 0.5312880859375, 0.5325035400390625, 0.5322506103515625, 0.5319270629882813, 0.5315594482421875, 0.53212158203125, 0.5314180908203125, 0.5320048828125, 0.5329141845703125, 0.5321277465820312, 0.5319035034179688, 0.5321103515625, 0.5317222290039062, 0.5319536743164063, 1.104712646484375, 0.531314697265625, 0.531794921875, 0.531409912109375, 0.5321390380859375, 0.5317406616210938, 0.5324042358398438, 0.5315665893554687, 0.531862548828125, 0.5314232177734375, 0.5315747680664062, 0.5311682739257813, 0.5318154296875, 0.53142529296875, 0.5318092651367188, 0.5314898071289063, 0.5318870849609375, 0.531378173828125, 0.53172119140625, 0.5311692504882812, 0.531430419921875, 0.5313966064453125, 0.531857421875, 0.5313710327148438, 0.5323397216796875, 0.5316075439453125, 0.5323120727539062, 0.5317294311523437, 0.5321543579101562, 0.5312010498046875, 0.5318461303710937, 0.5319188232421875, 0.532210693359375, 0.5314221801757812, 0.5321881713867187, 0.5314324340820312, 0.5315921630859375, 0.5312501831054688, 0.5330370483398438, 0.531240966796875, 0.5315143432617188, 0.5314150390625, 0.5320355834960937, 0.531736572265625, 0.532005859375, 0.5315584106445312, 0.5322998046875, 0.5318276977539063, 0.5317406616210938, 0.53136279296875, 0.5315768432617187, 0.531726318359375, 0.5317335205078125, 0.5318584594726562, 0.5316566772460938, 0.531314697265625, 0.5316884765625, 0.5317488403320313, 0.5318338623046875, 0.5312901000976562, 0.53187890625, 0.5315584106445312, 0.5323212890625, 1.1061114501953124, 0.5317314453125, 0.5319290771484375, 0.531072998046875, 0.5316290283203124, 0.5314488525390625, 0.5314508666992187, 0.5313126220703125, 0.5319659423828125, 0.5314447631835938, 0.5315645141601563, 0.5322833862304688, 0.5317775268554688, 0.5313228759765625, 0.5322045288085937, 0.53157373046875, 0.5320222778320313, 0.53178369140625, 0.5322998046875, 0.5316710205078125, 0.5317069091796875, 0.531219482421875, 0.5317181396484375, 0.5315245971679687, 0.531736572265625, 0.5315369262695312, 0.53231103515625, 0.5319618530273438, 0.5321339111328125, 0.5312696533203125, 0.5317406616210938, 0.53157275390625, 0.5317908325195313, 0.5312593994140625, 0.5319639282226563, 0.5313003540039063, 0.5317816162109374, 0.5313792114257813, 0.531936279296875, 0.5312071533203125, 0.5314293823242188, 0.5310996704101563, 0.531589111328125, 0.5313955688476563, 0.53193115234375, 0.5313556518554687, 0.5319588012695312, 0.531583984375, 0.532021240234375, 0.5315205078125, 0.5315798950195313, 0.5314150390625, 0.5316608276367187, 0.53139453125, 0.5319014282226563, 0.5313863525390625, 0.5320611572265626, 0.5318748168945312, 0.5321339111328125, 0.5318461303710937, 0.5317355346679687, 0.5316792602539062, 0.5318819580078125, 1.1075174560546874, 0.531072021484375, 0.5316382446289063, 0.5312890625, 0.53142529296875, 0.531535888671875, 0.5324411010742187, 0.5314232177734375, 0.5321287841796875, 0.5315972900390625, 0.5322034912109375, 0.5314857177734374, 0.5315389404296875, 0.531357666015625, 0.5317713623046875, 0.5311488037109375, 0.5317294311523437, 0.5311682739257813, 0.531652587890625, 0.5319782104492188, 0.5317386474609375, 0.5313341674804688, 0.5316669311523438, 0.5313074951171874, 0.5316454467773437, 0.5311918334960938, 0.5317662963867188, 0.5315706787109375, 0.5321380004882813, 0.5316188354492187, 0.5322280883789062, 0.5312737426757812, 0.5317304077148437, 0.5311057739257813, 0.5317775268554688, 0.5316751098632813, 0.5317723999023437, 0.5313269653320313, 0.532220947265625, 0.531409912109375, 0.531726318359375, 0.5313054809570312, 0.5317427368164063, 0.5314866943359375, 0.5319229736328125, 0.5314406127929687, 0.532294677734375, 0.5323673706054688, 0.5322874755859375, 0.5322711181640625, 0.5320068969726562, 0.5316044921875, 0.5322977294921875, 0.531583984375, 0.5319116821289063, 0.5315798950195313, 0.532516845703125, 0.5323622436523437, 0.5323622436523437, 0.5315491943359375, 0.5319935913085938, 0.5320017700195312, 0.531962890625, 1.1080765380859374, 0.5319547119140625, 0.5317703857421875, 0.53150927734375, 0.5314652099609375, 0.5311805419921874, 0.53163623046875, 0.5313218383789062, 0.531493896484375, 0.531631103515625, 0.5318482055664062, 0.5316925659179688, 0.5316792602539062, 0.5311713256835937, 0.53180517578125, 0.5314037475585938, 0.531620849609375, 0.5318748168945312, 0.5327401123046875, 0.5320171508789062, 0.5328353271484375, 0.5319864501953125, 0.5319833374023437, 0.5312788696289062, 0.53235302734375, 0.5315451049804687, 0.5321615600585937, 0.5315635375976563, 0.5320089721679687, 0.5313218383789062, 0.5321666259765625, 0.5312501831054688, 0.5315911865234375, 0.5312686157226563, 0.5321390380859375, 0.531409912109375, 0.531768310546875, 0.5311641845703124, 0.5316935424804687, 0.5315829467773437, 0.5322066040039063, 0.5317447509765625, 0.5320120239257813, 0.5312399291992187, 0.5316751098632813, 0.5313863525390625, 0.5317621459960937, 0.531684326171875, 0.5316658935546875, 0.5314549560546875, 0.5319035034179688, 0.531262451171875, 0.5315963134765626, 0.5311375122070312, 0.5317283935546875, 0.5315194702148438, 0.5318932495117188, 0.532084716796875, 0.5324154663085937, 0.5319188232421875, 0.5318717651367187, 0.5313474731445312, 0.5320181884765625, 1.10763720703125, 0.5311539306640625, 0.5317109985351562, 0.5316484985351563, 0.5316484985351563, 0.5312767944335938, 0.5317509155273438, 0.5310279541015624, 0.5314611206054688, 0.5312849731445313, 0.53146728515625, 0.5310648193359375, 0.5322076416015625, 0.53161572265625, 0.5324257202148438, 0.5314857177734374, 0.5316536254882812, 0.5312450561523437, 0.5317069091796875, 0.5315901489257813, 0.5321072387695313, 0.5315020751953125, 0.531820556640625, 0.5321041870117188, 0.5318809814453125, 0.5312317504882812, 0.532041748046875, 0.5315451049804687, 0.5318154296875, 0.5316792602539062, 0.5321062622070313, 0.5322495727539063, 0.5319905395507812, 0.531694580078125, 0.5317621459960937, 0.5315451049804687, 0.5317488403320313, 0.5312747802734376, 0.5321328735351563, 0.531746826171875, 0.5318154296875, 0.5317119750976562, 0.531684326171875, 0.5313290405273438, 0.5314979858398438, 0.5311682739257813, 0.5318164672851563, 0.5316884765625, 0.53195263671875, 0.5318246459960938, 0.532116455078125, 0.5315665893554687, 0.5320376586914063, 0.5314426879882812, 0.5319772338867188, 0.5315123291015625, 0.53215234375, 0.53184716796875, 0.5321748657226563, 0.5318123779296875, 0.5321236572265625, 0.5316618041992187, 0.5318829956054687]",tokens/s,1.8521243642629592,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1310.69952,921.174016,0.0,274.726912,220.646912,s,10,0.36048070526123044,0.03604807052612304,0.0010977874344766398,0.03588955307006836,0.036477619552612306,0.03779876155853271,0.03885567516326904,"[0.03911990356445313, 0.03553478240966797, 0.03469347381591797, 0.035817951202392576, 0.036184032440185546, 0.035873409271240234, 0.03543529510498047, 0.03591993713378906, 0.03590569686889648, 0.03599622344970703]",tokens/s,7101.628360787961,kWh,4.1762897090252873e-07,2.2884077887720709e-07,9.141624902616015e-07,1.5606322400413373e-06,tokens/kWh,164036083.2179266,MB,1311.0272,921.174016,0.0,274.726912,250.723328,s,10,21.866080322265628,2.186608032226563,0.01854961703991556,2.191078857421875,2.2041080810546876,2.2073608032226564,2.2099629809570316,"[2.1803974609375, 2.17111474609375, 2.154228759765625, 2.200171142578125, 2.210613525390625, 2.181986572265625, 2.161680419921875, 2.20071923828125, 2.20338525390625, 2.201783203125]",tokens/s,28.81174818325753,kWh,2.578141039891902e-05,1.4128932798644376e-05,5.28597971955373e-05,9.27701403931007e-05,tokens/kWh,679097.8188999841,,s,629,22.151173107147223,0.03521649142630718,0.004356343174673542,0.03486105728149414,0.035241778564453125,0.03557929000854492,0.07092830078125002,"[0.03474124908447265, 0.03529420852661133, 0.03632339096069336, 0.03650860977172852, 0.036083713531494144, 0.037152767181396484, 0.03645132827758789, 0.036311038970947264, 0.03472588729858399, 0.0354252815246582, 0.03531980895996094, 0.03479657745361328, 0.03465929412841797, 0.03616358566284179, 0.035585025787353515, 0.03515903854370117, 0.03486105728149414, 0.034651134490966795, 0.034900993347167966, 0.03449958419799805, 0.03328102493286133, 0.03349708938598633, 0.033560577392578124, 0.03324620819091797, 0.033328128814697267, 0.033255424499511715, 0.033754112243652344, 0.033223743438720706, 0.03327481460571289, 0.033438720703125, 0.0335175666809082, 0.03376332855224609, 0.03339468765258789, 0.03345305633544922, 0.03355033493041992, 0.034203647613525394, 0.03342233657836914, 0.03335372924804687, 0.0336732177734375, 0.03367116928100586, 0.03360870361328125, 0.03362508773803711, 0.03436441421508789, 0.03604377746582031, 0.03504127883911133, 0.03473920059204102, 0.0348037109375, 0.03472895812988281, 0.03472895812988281, 0.03479865646362305, 0.03479955291748047, 0.03479449462890625, 0.03474739074707031, 0.03478220748901367, 0.0347064323425293, 0.034907135009765625, 0.03474431991577148, 0.03473715209960938, 0.03485388946533203, 0.034683902740478514, 0.035019775390625, 0.0350750732421875, 0.0711178207397461, 0.0347955207824707, 0.034948097229003904, 0.03439616012573242, 0.03447808074951172, 0.03496755218505859, 0.034920448303222655, 0.0348590087890625, 0.034699264526367186, 0.0346163215637207, 0.03491430282592774, 0.03490304183959961, 0.03508019256591797, 0.034423809051513675, 0.03472793579101562, 0.03449446487426758, 0.03476377487182617, 0.03476582336425781, 0.03468902587890625, 0.03387494277954101, 0.0342210578918457, 0.034008094787597656, 0.03374998474121094, 0.03369062423706055, 0.033459201812744144, 0.03354937744140625, 0.03469715118408203, 0.03489484786987305, 0.03487027359008789, 0.03510067367553711, 0.03503513717651367, 0.03503923034667969, 0.03469619369506836, 0.03455590438842773, 0.03517852783203125, 0.03441353607177734, 0.03464704132080078, 0.03492659378051758, 0.034506752014160154, 0.03476070404052734, 0.03487846374511719, 0.03493478393554687, 0.03499212646484375, 0.034945056915283206, 0.035192798614501954, 0.03450265502929688, 0.03448524856567383, 0.03455487823486328, 0.034563072204589845, 0.03503104019165039, 0.03474431991577148, 0.03377872085571289, 0.03339875030517578, 0.03340800094604492, 0.03347558212280274, 0.03381350326538086, 0.033650688171386715, 0.033535999298095705, 0.033334270477294925, 0.03363123321533203, 0.03390771102905273, 0.03361996841430664, 0.03386880111694336, 0.06886093139648437, 0.03341823959350586, 0.03395993423461914, 0.03393228912353516, 0.03374387359619141, 0.03333529663085937, 0.0334919662475586, 0.03423027038574219, 0.03382476806640625, 0.03374387359619141, 0.0336824951171875, 0.033724353790283206, 0.03363532638549805, 0.033941505432128906, 0.03381760025024414, 0.033503231048583985, 0.03344998550415039, 0.033301502227783206, 0.033445888519287106, 0.033957889556884766, 0.03387801742553711, 0.03366092681884766, 0.03360153579711914, 0.03381043243408203, 0.033825790405273434, 0.03360870361328125, 0.03370703887939453, 0.03348271942138672, 0.03366502380371094, 0.03378995132446289, 0.03319193649291992, 0.03369267272949219, 0.03357900619506836, 0.033358848571777344, 0.0341104621887207, 0.03495731353759766, 0.03543961715698242, 0.03482316970825195, 0.034435073852539064, 0.034557952880859374, 0.034900993347167966, 0.034678783416748044, 0.03517542266845703, 0.03521023941040039, 0.035046398162841795, 0.03481702423095703, 0.03453235244750977, 0.03470131301879883, 0.03486412811279297, 0.03474739074707031, 0.03477196884155274, 0.03457843017578125, 0.034781185150146485, 0.03486310577392578, 0.035125247955322264, 0.03486617660522461, 0.03496243286132812, 0.03503513717651367, 0.03523891067504883, 0.03469823837280273, 0.0339128303527832, 0.03414220809936523, 0.035117088317871095, 0.0711659164428711, 0.03465318298339844, 0.03465727996826172, 0.03458355331420898, 0.034283519744873044, 0.034304000854492187, 0.03577958297729492, 0.03595161437988281, 0.03544473648071289, 0.03500543975830078, 0.03521843338012695, 0.03495935821533203, 0.0349194221496582, 0.0354252815246582, 0.03515903854370117, 0.0349306869506836, 0.03492454528808594, 0.036211711883544925, 0.03564441680908203, 0.03498086547851562, 0.034841598510742186, 0.034941951751708986, 0.03537612915039062, 0.034994174957275394, 0.0350382080078125, 0.03486105728149414, 0.03484672164916992, 0.03494297790527344, 0.03524095916748047, 0.03526041412353516, 0.03520614242553711, 0.03488153457641602, 0.03502796936035156, 0.03500543975830078, 0.03404185485839844, 0.033903617858886716, 0.03378073501586914, 0.03472281646728516, 0.03411251068115234, 0.03379097747802735, 0.03348787307739258, 0.0343633918762207, 0.03492659378051758, 0.03500339126586914, 0.03493788909912109, 0.03485590362548828, 0.034895870208740236, 0.034948097229003904, 0.03518771362304687, 0.034479103088378905, 0.03459174346923828, 0.035253280639648436, 0.034963424682617185, 0.034976768493652347, 0.03491843032836914, 0.034911201477050784, 0.035046398162841795, 0.03507199859619141, 0.03536383819580078, 0.03499929428100586, 0.03523379135131836, 0.03531161499023437, 0.03527679824829102, 0.07160934448242187, 0.0352624626159668, 0.03535769653320313, 0.03481087875366211, 0.03554921722412109, 0.034968544006347656, 0.03489491271972656, 0.034890689849853516, 0.03483238220214844, 0.034993152618408206, 0.035932159423828124, 0.03508838272094727, 0.03496755218505859, 0.03476377487182617, 0.03488358306884766, 0.03527884674072266, 0.03521228790283203, 0.035040256500244144, 0.03476684951782227, 0.03499622344970703, 0.03482316970825195, 0.03518259048461914, 0.03503411102294922, 0.036178943634033206, 0.039564289093017575, 0.035097599029541016, 0.03457843017578125, 0.03505561447143555, 0.034756607055664065, 0.0350013427734375, 0.03477811050415039, 0.035004417419433595, 0.03501772689819336, 0.0348671989440918, 0.03473100662231445, 0.0349224967956543, 0.03482931137084961, 0.03578777694702148, 0.0350115852355957, 0.03483443069458008, 0.03486412811279297, 0.03486207962036133, 0.034938880920410156, 0.03505561447143555, 0.035004417419433595, 0.034887680053710936, 0.034531326293945314, 0.03493580627441406, 0.03510067367553711, 0.03492454528808594, 0.034887680053710936, 0.03578879928588867, 0.034974720001220705, 0.03519180679321289, 0.034850879669189455, 0.03495315170288086, 0.035060768127441404, 0.034809825897216796, 0.03377766418457031, 0.03550620651245117, 0.034968544006347656, 0.034915328979492184, 0.03489177703857422, 0.07134719848632813, 0.03493273544311523, 0.03501465606689453, 0.03539148712158203, 0.03512319946289062, 0.034813953399658204, 0.03508019256591797, 0.03486515045166016, 0.035079166412353514, 0.03493580627441406, 0.034939903259277344, 0.034925567626953126, 0.03743027114868164, 0.034576385498046876, 0.03397836685180664, 0.03377356719970703, 0.03403878402709961, 0.03371724700927734, 0.03380223846435547, 0.03486310577392578, 0.03486412811279297, 0.03496243286132812, 0.03475046539306641, 0.03492659378051758, 0.03499622344970703, 0.034783233642578126, 0.033949695587158206, 0.033876991271972655, 0.03427123260498047, 0.03477811050415039, 0.03472076797485352, 0.034127872467041014, 0.03329740905761719, 0.0333834228515625, 0.03379814529418945, 0.03392409515380859, 0.03363532638549805, 0.03386982345581055, 0.03498086547851562, 0.034928638458251955, 0.03496345520019531, 0.03495423889160156, 0.03523891067504883, 0.03508838272094727, 0.034769920349121096, 0.03489279937744141, 0.03482624053955078, 0.034955265045166016, 0.03508838272094727, 0.03486412811279297, 0.03486515045166016, 0.03471769714355469, 0.034560001373291016, 0.034579456329345705, 0.03481292724609375, 0.03479244613647461, 0.034802688598632815, 0.03475254440307617, 0.03517948913574219, 0.03433884811401367, 0.033702880859375, 0.03359539031982422, 0.034111488342285154, 0.07044096374511719, 0.03437670516967774, 0.03483238220214844, 0.0348671989440918, 0.03470438385009766, 0.035230720520019534, 0.034816001892089846, 0.03467366409301758, 0.0347770881652832, 0.03501055908203125, 0.03429580688476563, 0.03443609619140625, 0.03509964752197266, 0.03524505615234375, 0.0347248649597168, 0.034753536224365236, 0.03496345520019531, 0.03496448135375976, 0.034770942687988284, 0.034799617767333986, 0.03484467315673828, 0.03481190490722656, 0.034353153228759765, 0.034062335968017575, 0.03349094390869141, 0.033584129333496096, 0.033587200164794925, 0.034141185760498044, 0.0347740478515625, 0.0345906867980957, 0.0344268798828125, 0.03489484786987305, 0.03420979309082031, 0.03364352035522461, 0.03377356719970703, 0.033686527252197264, 0.033290241241455076, 0.0337520637512207, 0.03401932907104492, 0.03371417617797851, 0.03375513458251953, 0.033691646575927735, 0.03370809555053711, 0.03350316619873047, 0.03325439834594727, 0.03360255813598633, 0.033860607147216795, 0.03376230239868164, 0.033691646575927735, 0.033783809661865234, 0.033797119140625, 0.03425177764892578, 0.03381452941894531, 0.033791999816894534, 0.033675262451171875, 0.03391385650634766, 0.034165760040283204, 0.03502899169921875, 0.03486822509765625, 0.034702335357666016, 0.035046398162841795, 0.03521535873413086, 0.034825214385986326, 0.07150080108642579, 0.03484364700317383, 0.034351104736328124, 0.03453747177124023, 0.035064830780029296, 0.03482931137084961, 0.03483238220214844, 0.03492659378051758, 0.034974720001220705, 0.03501465606689453, 0.034869247436523435, 0.034830337524414064, 0.034277374267578126, 0.034797569274902344, 0.03501363372802734, 0.03530342483520508, 0.0351016960144043, 0.03491635131835937, 0.03483238220214844, 0.03484672164916992, 0.03495116806030273, 0.03500646209716797, 0.03499827194213867, 0.03480678558349609, 0.03486207962036133, 0.03505152130126953, 0.034784255981445314, 0.034783233642578126, 0.034835456848144535, 0.03493580627441406, 0.034770942687988284, 0.035037185668945314, 0.035019775390625, 0.0342476806640625, 0.03505049514770508, 0.034315265655517575, 0.03477913665771484, 0.034791454315185544, 0.0353023681640625, 0.03562700653076172, 0.03510374450683594, 0.03493580627441406, 0.035156993865966796, 0.03526144027709961, 0.03488256072998047, 0.034933761596679686, 0.03517337417602539, 0.0348590087890625, 0.0352911376953125, 0.035888126373291016, 0.03522048187255859, 0.03506073760986328, 0.03483955383300781, 0.03486105728149414, 0.03498400115966797, 0.03498387145996094, 0.03485494232177734, 0.03438998413085938, 0.03479244613647461, 0.03507712173461914, 0.03500339126586914, 0.03471257781982422, 0.03476684951782227, 0.07122022247314454, 0.03482316970825195, 0.03523276901245117, 0.034941951751708986, 0.03477196884155274, 0.034991104125976565, 0.03495731353759766, 0.034939903259277344, 0.03488153457641602, 0.034900993347167966, 0.0347955207824707, 0.0350300178527832, 0.03487948989868164, 0.03520614242553711, 0.035297279357910154, 0.03513651275634765, 0.03494297790527344, 0.0349378547668457, 0.034948097229003904, 0.03488051223754883, 0.03481497573852539, 0.03454873657226563, 0.03495935821533203, 0.035062782287597655, 0.035043327331542966, 0.0349306869506836, 0.03497881698608398, 0.034909183502197266, 0.03493791961669922, 0.03483539199829101, 0.034869247436523435, 0.035031105041503904, 0.03493983840942383, 0.03483340835571289, 0.03487744140625, 0.03489484786987305, 0.03516108703613281, 0.03484364700317383, 0.034969600677490234, 0.035146751403808595, 0.035194881439208986, 0.03479244613647461, 0.03557068634033203, 0.03522969436645508, 0.035337215423583986, 0.03480473709106445, 0.03436236953735351, 0.033745918273925785, 0.03496755218505859, 0.03541401672363281, 0.03512934494018555, 0.035163135528564454, 0.0348487663269043, 0.03500339126586914, 0.03484467315673828, 0.03498495864868164, 0.03576115036010742, 0.03517030334472656, 0.03490816116333008, 0.034825214385986326, 0.0347586555480957, 0.03516723251342774, 0.035053569793701174, 0.07151315307617187, 0.03519071960449219, 0.03483852767944336, 0.03482419204711914, 0.03502592086791992, 0.03529216003417969, 0.035125247955322264, 0.034872318267822264, 0.034939903259277344, 0.0347658576965332, 0.03481699371337891, 0.03497267150878906, 0.03488665771484375, 0.034958335876464845, 0.035007488250732424, 0.03504537582397461, 0.03489382553100586, 0.03520716857910156, 0.034988033294677735, 0.034874366760253905, 0.03459481430053711, 0.03430297470092773, 0.0346879997253418, 0.03478732681274414, 0.03508736038208008, 0.0354856948852539, 0.034981952667236325, 0.0348732795715332, 0.03489382553100586, 0.03475558471679688, 0.03478732681274414, 0.03488358306884766, 0.03473920059204102, 0.03476172637939453, 0.034685951232910156, 0.035111934661865234, 0.03508736038208008, 0.03526553726196289, 0.03492147064208984, 0.03475763320922851, 0.034767871856689454, 0.03482316970825195, 0.03498092651367188, 0.03539142227172851, 0.03607961654663086, 0.03499008178710938, 0.03491123199462891, 0.034990142822265625, 0.03488249588012695, 0.03484467315673828, 0.03486207962036133, 0.0349378547668457, 0.034953216552734374, 0.03488051223754883, 0.03490816116333008, 0.0349378547668457, 0.03494604873657227, 0.03508838272094727, 0.03482726287841797, 0.03486003112792969, 0.03481702423095703, 0.034729984283447264, 0.0349224967956543]",tokens/s,28.395787300179112,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1358.589952,2103.967744,0.0,1457.52064,1272.881664,s,10,1.3631298065185549,0.1363129806518555,0.0008682167068706195,0.13635552215576174,0.1370564453125,0.13756546936035158,0.13797268859863282,"[0.13807449340820313, 0.13481919860839844, 0.13663923645019532, 0.13648768615722656, 0.1363610534667969, 0.13634999084472657, 0.13634962463378905, 0.1369433288574219, 0.13605657958984374, 0.13504861450195313]",tokens/s,1878.030975302537,kWh,1.5893511683852586e-06,8.706513991564859e-07,6.58916082688051e-06,9.049163394422252e-06,tokens/kWh,28289908.010479063,MB,1358.589952,2103.967744,0.0,1457.52064,1369.424896,s,10,80.1509423828125,8.01509423828125,0.04758558328935424,8.03790185546875,8.060850390625001,8.06196572265625,8.06285798828125,"[7.943890625, 7.97273681640625, 8.0463369140625, 8.0606025390625, 8.06037841796875, 8.0630810546875, 8.0572548828125, 8.029466796875, 7.960978515625, 7.9562158203125]",tokens/s,7.860169590907975,kWh,9.384463308144499e-05,5.143388635707014e-05,0.00037368961561811727,0.0005189681350566323,tokens/kWh,121394.73648632616,,s,629,81.24969168090827,0.12917280076455995,0.016234788877661135,0.127678466796875,0.12822896728515623,0.12854743347167968,0.26253706176757813,"[0.12623462677001954, 0.12626534271240233, 0.12749107360839843, 0.12655411529541016, 0.12602880096435548, 0.12595507049560548, 0.12608409881591798, 0.125949951171875, 0.12577792358398437, 0.1258967056274414, 0.12588851165771484, 0.12596736145019533, 0.12578099060058595, 0.1260912628173828, 0.12604723358154296, 0.12598169708251952, 0.12593971252441405, 0.12601139068603515, 0.12579634857177735, 0.12603392028808594, 0.12579840087890626, 0.12579532623291015, 0.1258967056274414, 0.1258086395263672, 0.1257297897338867, 0.12582093048095702, 0.1259315185546875, 0.12595609283447265, 0.12572783660888673, 0.12588227081298828, 0.12573081970214844, 0.12737945556640626, 0.1259468765258789, 0.12588646697998046, 0.12581478118896483, 0.12592435455322265, 0.1258792953491211, 0.1258588180541992, 0.12746444702148438, 0.12731187438964844, 0.12609945678710938, 0.12609843444824217, 0.12581478118896483, 0.12597452545166016, 0.12607078552246093, 0.12630118560791015, 0.12647833251953125, 0.12594483184814453, 0.12569087982177735, 0.12633702087402343, 0.12602880096435548, 0.1259898910522461, 0.126023681640625, 0.12634214019775392, 0.12730879974365233, 0.12593357086181642, 0.1258250274658203, 0.12601036834716797, 0.1260175323486328, 0.12602060699462891, 0.1258598403930664, 0.1257185287475586, 0.2625024108886719, 0.12606771087646484, 0.1269903335571289, 0.12595507049560548, 0.12662681579589843, 0.1257164764404297, 0.12573081970214844, 0.12580966186523437, 0.1257676773071289, 0.12584345245361328, 0.12604723358154296, 0.12586700439453125, 0.125876220703125, 0.12575027465820313, 0.12595507049560548, 0.12601856231689454, 0.12643840026855468, 0.12581068420410157, 0.12602265930175782, 0.12638412475585936, 0.12576153564453124, 0.12624486541748048, 0.12597964477539061, 0.12647833251953125, 0.12601856231689454, 0.12584652709960936, 0.12577792358398437, 0.12563763427734376, 0.12597145843505858, 0.1260359649658203, 0.12596121978759767, 0.1258055648803711, 0.12600422668457031, 0.1259345932006836, 0.12581273651123046, 0.12602674865722657, 0.12610355377197266, 0.12577279663085938, 0.12581375885009766, 0.12614963531494142, 0.1260062713623047, 0.12613426971435546, 0.1257359390258789, 0.1258567657470703, 0.12633190155029297, 0.1259694061279297, 0.12657254028320314, 0.12820480346679688, 0.12799180603027344, 0.12775526428222655, 0.127710205078125, 0.127857666015625, 0.1278545913696289, 0.12770918273925783, 0.12889190673828124, 0.1281597442626953, 0.12789145660400392, 0.12811878967285156, 0.12782284545898437, 0.128, 0.12832972717285157, 0.12828466796875, 0.12807577514648438, 0.26479617309570314, 0.12780953979492188, 0.1277501449584961, 0.12807475280761718, 0.12764979553222655, 0.1277470703125, 0.12904141235351563, 0.12810957336425782, 0.12779315185546875, 0.12776959991455078, 0.12783103942871094, 0.128247802734375, 0.1280174102783203, 0.12772045135498047, 0.12871270751953126, 0.12824371337890625, 0.12798976135253906, 0.12790579223632811, 0.12783103942871094, 0.12777574157714844, 0.1258086395263672, 0.12573798370361328, 0.12613222503662108, 0.12614860534667968, 0.12590592193603517, 0.12583219146728516, 0.12566630554199218, 0.12582093048095702, 0.12807679748535156, 0.1276159973144531, 0.12811672973632812, 0.1279078369140625, 0.1277327346801758, 0.12819967651367187, 0.12767539215087892, 0.1281658935546875, 0.12766515350341798, 0.12815565490722655, 0.12822528076171874, 0.12830618286132814, 0.12780748748779297, 0.1277286376953125, 0.12833279418945312, 0.1282723846435547, 0.12803890991210937, 0.12805836486816408, 0.12844236755371094, 0.12783411407470704, 0.12770611572265625, 0.12779724884033203, 0.1281546173095703, 0.1276159973144531, 0.12784230041503905, 0.12783206176757814, 0.12786688232421875, 0.128142333984375, 0.12768051147460938, 0.12766822052001953, 0.12811264038085937, 0.1277491226196289, 0.12756582641601563, 0.1288970184326172, 0.1281495056152344, 0.26441317749023435, 0.12781977844238282, 0.12796927642822264, 0.12775628662109376, 0.12817613220214844, 0.12858061218261718, 0.12771942138671874, 0.12783513641357422, 0.1279508514404297, 0.12757606506347657, 0.12777369689941406, 0.12777164459228516, 0.12801126098632812, 0.12763545227050782, 0.12789043426513672, 0.1276436462402344, 0.1276559371948242, 0.12776959991455078, 0.1282856903076172, 0.12782080078125, 0.12882330322265625, 0.1280614471435547, 0.12808090209960937, 0.12776959991455078, 0.1277675552368164, 0.12821708679199217, 0.12763033294677734, 0.12757708740234375, 0.1277347869873047, 0.12844032287597656, 0.12804812622070313, 0.12764979553222655, 0.12763545227050782, 0.12835430908203124, 0.12770918273925783, 0.12769075012207032, 0.12814131164550782, 0.1277491226196289, 0.12796934509277344, 0.1278842239379883, 0.12797337341308593, 0.1286594543457031, 0.12793036651611328, 0.12776448059082032, 0.12818226623535156, 0.12790886688232422, 0.12799488067626952, 0.12788121795654298, 0.1278054428100586, 0.12845158386230468, 0.12768870544433594, 0.12787404632568358, 0.12859596252441408, 0.1280563201904297, 0.1278013458251953, 0.12784435272216796, 0.12770508575439454, 0.12821197509765625, 0.12766719818115235, 0.12801228332519532, 0.12829901123046875, 0.1277675552368164, 0.12806965637207032, 0.2643558044433594, 0.12766515350341798, 0.12768051147460938, 0.12765286254882813, 0.12751462554931642, 0.12763545227050782, 0.12779519653320312, 0.12767129516601564, 0.12799078369140626, 0.1276590042114258, 0.1277655029296875, 0.12870144653320312, 0.12818226623535156, 0.12776242828369141, 0.1278699493408203, 0.1276272659301758, 0.128184326171875, 0.12763545227050782, 0.12879769897460938, 0.12881100463867187, 0.12820684814453126, 0.12788735961914063, 0.1276211166381836, 0.12761087799072265, 0.12819456481933594, 0.12811878967285156, 0.12794572448730468, 0.12788735961914063, 0.12781465911865234, 0.12768563079833983, 0.1276211166381836, 0.12834815979003905, 0.12801945495605468, 0.1278720016479492, 0.12835430908203124, 0.1289707489013672, 0.12773990631103516, 0.12778495788574218, 0.12773990631103516, 0.1278167037963867, 0.12815155029296876, 0.12765081787109375, 0.1277675552368164, 0.12948786926269532, 0.12794265747070313, 0.1282826232910156, 0.1276600341796875, 0.12820378112792968, 0.12769792175292968, 0.1278238754272461, 0.12797235107421875, 0.12779212951660157, 0.12791193389892577, 0.12792217254638671, 0.12759859466552734, 0.12793548583984374, 0.12773580932617187, 0.12768563079833983, 0.12795597076416015, 0.12788633728027343, 0.12800306701660155, 0.1278238754272461, 0.127710205078125, 0.264237060546875, 0.12841676330566407, 0.12879667663574218, 0.12799590301513672, 0.12780032348632814, 0.12786688232421875, 0.1277480926513672, 0.12770201873779297, 0.1276231689453125, 0.12800204467773438, 0.12798976135253906, 0.12791295623779297, 0.12839730834960938, 0.1277163543701172, 0.12764569854736327, 0.12788428497314452, 0.12782284545898437, 0.12786585235595704, 0.12791705322265626, 0.12781362915039063, 0.12769792175292968, 0.12772147369384765, 0.12825599670410157, 0.1278382110595703, 0.12759859466552734, 0.12937522888183595, 0.1279283218383789, 0.12773990631103516, 0.127857666015625, 0.12759859466552734, 0.12825804138183594, 0.1278167037963867, 0.12803482055664062, 0.12878028869628907, 0.128005126953125, 0.1279477767944336, 0.12773785400390625, 0.12787814331054687, 0.12846284484863282, 0.12783001708984376, 0.12776345825195312, 0.12814437866210937, 0.12812594604492186, 0.1279324188232422, 0.12761395263671876, 0.12771737670898436, 0.128427001953125, 0.12770508575439454, 0.1278883819580078, 0.12929638671875, 0.1278699493408203, 0.12793856048583985, 0.12772147369384765, 0.12795597076416015, 0.12789965057373046, 0.1279477767944336, 0.127678466796875, 0.1288642578125, 0.12818534851074218, 0.1278371810913086, 0.12756070709228515, 0.1280880584716797, 0.1277829132080078, 0.26441522216796876, 0.12775321960449218, 0.1284147186279297, 0.12843110656738282, 0.12777062225341798, 0.12808601379394532, 0.12776242828369141, 0.12883660888671875, 0.12807986450195313, 0.1278924789428711, 0.12769280242919923, 0.1283819580078125, 0.12761497497558594, 0.12829286193847655, 0.1278545913696289, 0.12783411407470704, 0.12800921630859374, 0.12785151672363282, 0.12760985565185548, 0.1278935012817383, 0.127604736328125, 0.12804197692871094, 0.12766207885742187, 0.1277276153564453, 0.12755661010742186, 0.12782284545898437, 0.12766310119628907, 0.12764876556396484, 0.1277317123413086, 0.1276006393432617, 0.12769280242919923, 0.12792729949951173, 0.12772249603271485, 0.1276211166381836, 0.1279477767944336, 0.12771942138671874, 0.12809625244140624, 0.12782080078125, 0.12788121795654298, 0.1286420440673828, 0.12810751342773438, 0.12785151672363282, 0.12791603088378906, 0.12781362915039063, 0.1283072052001953, 0.12790271759033203, 0.1278935012817383, 0.12861235046386718, 0.1277870101928711, 0.12774092864990233, 0.12795699310302736, 0.1275709457397461, 0.12798976135253906, 0.12764262390136719, 0.128, 0.12771942138671874, 0.12753817749023438, 0.12765081787109375, 0.12802047729492189, 0.12789657592773437, 0.1276610565185547, 0.12777369689941406, 0.12768972778320312, 0.2650091552734375, 0.12773990631103516, 0.1276764144897461, 0.12778598022460938, 0.1281781768798828, 0.12770611572265625, 0.12761190032958986, 0.12885093688964844, 0.12839730834960938, 0.12804608154296876, 0.12754431915283204, 0.12804301452636718, 0.128110595703125, 0.12830003356933595, 0.1278771209716797, 0.1279293441772461, 0.12791705322265626, 0.1275125732421875, 0.12755865478515624, 0.12813107299804688, 0.12789657592773437, 0.12766413116455078, 0.12822528076171874, 0.12799488067626952, 0.1279774703979492, 0.12805223083496095, 0.1276610565185547, 0.12801023864746094, 0.12790271759033203, 0.12759347534179688, 0.12863999938964843, 0.1278760986328125, 0.12768768310546874, 0.127783935546875, 0.12769075012207032, 0.12832972717285157, 0.127678466796875, 0.12767948913574217, 0.12722994995117187, 0.1263267822265625, 0.12628173065185547, 0.1263114242553711, 0.12657766723632813, 0.12635852813720702, 0.12587315368652344, 0.12590592193603517, 0.12597657775878907, 0.12586598205566407, 0.1257543716430664, 0.1258301467895508, 0.12576563262939452, 0.12673535919189452, 0.12682342529296875, 0.12621414184570312, 0.12802662658691405, 0.12777574157714844, 0.12764569854736327, 0.12707532501220703, 0.12799078369140626, 0.12770816040039062, 0.12756787109375, 0.12807475280761718, 0.12647731018066405, 0.262550537109375, 0.1261670379638672, 0.12625305938720705, 0.12649779510498046, 0.12646707153320313, 0.12611174774169923, 0.12602777862548828, 0.12611993408203126, 0.12626739501953124, 0.12624588775634765, 0.12608512115478515, 0.12602572631835937, 0.12627763366699218, 0.12701081848144533, 0.12849766540527344, 0.12709273529052734, 0.12640460968017578, 0.1259356155395508, 0.12617932891845704, 0.12601856231689454, 0.12573184204101562, 0.1274439697265625, 0.1281474609375, 0.12596428680419922, 0.12658790588378907, 0.12630016326904298, 0.12737535858154297, 0.1271398391723633, 0.12651622772216797, 0.12802867126464842, 0.127494140625, 0.12671385955810546, 0.12621517181396485, 0.12586495971679687, 0.125949951171875, 0.12607692718505858, 0.12581990051269532, 0.1269585952758789, 0.12647628784179688, 0.1260400619506836, 0.1257011184692383, 0.1262888946533203, 0.12585062408447265, 0.1261506576538086, 0.1261619186401367, 0.12615885162353516, 0.12634521484375, 0.1262909469604492, 0.1259857940673828, 0.12619468688964844, 0.1256785888671875, 0.1258751983642578, 0.12573184204101562, 0.12563251495361327, 0.1259898910522461, 0.12623462677001954, 0.12567244720458984, 0.12609228515625, 0.12659916687011719, 0.12599807739257812, 0.12612505340576172, 0.1267558364868164, 0.12647731018066405, 0.2608926696777344, 0.12610150146484375, 0.1264158706665039, 0.12645785522460937, 0.126740478515625, 0.12682240295410158, 0.1262356491088867, 0.1261957092285156, 0.1260359649658203, 0.12633805084228517, 0.12569292449951172, 0.1258086395263672, 0.12725145721435546, 0.12759859466552734, 0.12629708862304687, 0.1263790054321289, 0.12595916748046876, 0.12631346893310547, 0.12617113494873047, 0.12567961883544923, 0.12705587005615235, 0.12577689361572267, 0.1257011184692383, 0.12581581115722656, 0.12577792358398437, 0.1261629409790039, 0.1258905563354492, 0.12567244720458984, 0.1256079330444336, 0.1280921630859375, 0.1263073272705078, 0.12712857818603515, 0.12635340881347656, 0.1265233917236328, 0.12599603271484375, 0.1261506576538086, 0.1261844482421875, 0.1280061492919922, 0.12627763366699218, 0.12625305938720705, 0.12565094757080078, 0.12626022338867188, 0.12611686706542968, 0.12614860534667968, 0.12619161224365236, 0.12624486541748048, 0.12653772735595703, 0.12628070068359376, 0.1262581787109375, 0.12623872375488282, 0.12604415893554688, 0.12649472045898438, 0.12680703735351562, 0.12633395385742188, 0.1260031967163086, 0.12610867309570312, 0.1264230422973633, 0.12621107482910157, 0.12615679931640625, 0.12624486541748048, 0.1259898910522461, 0.12594892883300782, 0.1265080337524414]",tokens/s,7.741567838438954,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc32c-3fee8fe9183a38e16d155666;326a02db-ee51-49aa-b3a8-216594797809) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f10-155cd66c04caf8b3021d14b5;1910f87a-5c23-422b-b857-5c274a5ed28d) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2022.883328,5539.10272,0.0,4892.655616,4542.741504,s,10,5.694402282714844,0.5694402282714843,0.0022278964552512335,0.5688670349121093,0.5707117797851562,0.5731342102050782,0.5750721545410157,"[0.575556640625, 0.5690338134765625, 0.5678692626953125, 0.5687002563476562, 0.5676268920898437, 0.5674249267578125, 0.5684844970703125, 0.5696422119140625, 0.5701734619140625, 0.5698903198242188]",tokens/s,449.56430418883286,kWh,6.703966662839608e-06,3.6734817678128214e-06,3.077389807590622e-05,4.115134650655865e-05,tokens/kWh,6220938.601831633,MB,2022.883328,5539.10272,0.0,4892.655616,4726.280192,s,10,334.554328125,33.4554328125,0.012952548332308819,33.45466796875,33.46486484375,33.476350390625,33.485538828125,"[33.4878359375, 33.44866796875, 33.443125, 33.44916015625, 33.43737890625, 33.4588671875, 33.4546875, 33.4623125, 33.4546484375, 33.45764453125]",tokens/s,1.8831022259697452,kWh,0.00039510093122168826,0.00021654957452975094,0.0018051211817500937,0.0024167716875015325,tokens/kWh,26067.832690116305,,s,629,339.1397302856444,0.5391728621393395,0.06755475317116652,0.53097265625,0.5316083862304688,0.5317822387695312,1.09872478515625,"[0.5310126342773438, 0.53113037109375, 0.5317058715820312, 0.530935791015625, 0.5307023315429688, 0.531651611328125, 0.5313556518554687, 0.53151025390625, 0.5312081909179688, 0.5308344116210938, 0.5306572875976563, 0.531704833984375, 0.5313720092773437, 0.5314744262695312, 0.5315502319335937, 0.5319423828125, 0.5314641723632813, 0.53194140625, 0.5313074951171874, 0.53119384765625, 0.5315758056640625, 0.5314529418945313, 0.5310607299804687, 0.5313535766601563, 0.5317161254882813, 0.531689453125, 0.53146826171875, 0.531725341796875, 0.5313976440429687, 0.53174169921875, 0.5316290283203124, 0.5319515991210938, 0.5317816162109374, 0.5313218383789062, 0.5316331787109375, 0.5317826538085938, 0.5314375610351563, 0.5311324462890625, 0.5311549682617187, 0.5317427368164063, 0.5311119384765625, 0.5312276611328125, 0.5314365234375, 0.5316792602539062, 0.5316904907226563, 0.5320693969726562, 0.531862548828125, 0.5321605224609375, 0.5318092651367188, 0.5320345458984375, 0.5320376586914063, 0.531989501953125, 0.5317529296875, 0.5317816162109374, 0.532279296875, 0.531968994140625, 0.531651611328125, 0.5318492431640625, 0.5322158203125, 0.5313269653320313, 0.5311262817382812, 0.5317109985351562, 1.100517333984375, 0.531336181640625, 0.5311201171875, 0.5306900634765624, 0.5306705932617187, 0.5307289428710937, 0.5306470336914062, 0.5307658081054687, 0.530572265625, 0.5306316528320313, 0.5310679321289062, 0.5309183959960937, 0.5307955322265625, 0.5309910888671875, 0.5315430297851562, 0.531473388671875, 0.5305477294921875, 0.5311221923828126, 0.5307709350585937, 0.530966552734375, 0.5305128784179688, 0.5308323974609375, 0.5309839477539062, 0.5310494995117188, 0.5307473754882812, 0.5309757690429687, 0.5309235229492187, 0.5309337768554687, 0.53066650390625, 0.5311743774414063, 0.53091943359375, 0.5312245483398438, 0.5310628051757813, 0.5319270629882813, 0.5310371704101563, 0.5307791137695312, 0.5309276123046875, 0.530777099609375, 0.5308047485351562, 0.5308313598632812, 0.530946044921875, 0.5307647705078125, 0.5308211059570312, 0.5306654663085938, 0.530524169921875, 0.5307432861328125, 0.5308251953125, 0.53072998046875, 0.530977783203125, 0.5311867065429687, 0.5309869995117188, 0.5312542724609375, 0.5314273071289063, 0.5308969116210938, 0.5310075073242188, 0.5313853149414063, 0.5306746826171875, 0.53089892578125, 0.5307893676757812, 0.5309020385742188, 0.5307484130859375, 0.5309389038085938, 0.5306982421875, 1.0987735595703125, 0.5308344116210938, 0.530502685546875, 0.5315389404296875, 0.5309573364257812, 0.5307207641601562, 0.530703369140625, 0.531072021484375, 0.5305855712890625, 0.5308221435546875, 0.530577392578125, 0.530745361328125, 0.5306163330078125, 0.5306101684570312, 0.5306142578125, 0.5307074584960938, 0.5306920776367188, 0.530682861328125, 0.5304330444335937, 0.5306326904296875, 0.5307760620117188, 0.5308211059570312, 0.5306358032226562, 0.53072998046875, 0.5306920776367188, 0.5310648193359375, 0.5310341186523437, 0.5307627563476562, 0.530850830078125, 0.5307586669921875, 0.5305743408203125, 0.530819091796875, 0.5308047485351562, 0.5305784301757812, 0.5307218017578125, 0.5306890258789062, 0.5307053833007812, 0.5306900634765624, 0.5304514770507812, 0.5309265747070312, 0.5308579711914062, 0.5307044067382812, 0.5306583251953125, 0.530681884765625, 0.5314078979492187, 0.5308272705078125, 0.53083544921875, 0.5314437255859376, 0.5309910888671875, 0.5313760986328125, 0.5309757690429687, 0.5308323974609375, 0.5307709350585937, 0.5311876831054687, 0.5309788208007813, 0.5310750732421875, 0.5309757690429687, 0.5311590576171875, 0.530819091796875, 0.5316198120117187, 0.5310689086914062, 0.5308856201171875, 0.530956298828125, 1.099652099609375, 0.5306685180664062, 0.5309573364257812, 0.5311047973632812, 0.5308251953125, 0.530680908203125, 0.5306971435546874, 0.5309389038085938, 0.5305805053710938, 0.5308150024414062, 0.530714599609375, 0.5305753784179688, 0.530609130859375, 0.5306388549804687, 0.5310023803710937, 0.5311734008789063, 0.5306429443359375, 0.5309942016601562, 0.5309910888671875, 0.5308006591796876, 0.5307330322265625, 0.5312122802734375, 0.5308692626953125, 0.53094091796875, 0.5309368286132813, 0.5310699462890625, 0.531135498046875, 0.5310894165039063, 0.5310156860351563, 0.5310453491210938, 0.5309798583984375, 0.5312214965820312, 0.5310709838867187, 0.5311631469726562, 0.5312348022460938, 0.5308098754882813, 0.5313177490234375, 0.5314129638671875, 0.5311539306640625, 0.5311221923828126, 0.5308692626953125, 0.5309265747070312, 0.5310023803710937, 0.5306695556640625, 0.5307924194335938, 0.5308477172851562, 0.5312583618164063, 0.5308375244140625, 0.5306859741210938, 0.5308375244140625, 0.5310238647460938, 0.5310392456054688, 0.5313167114257813, 0.5309163818359375, 0.530735107421875, 0.5309808349609375, 0.5313843383789062, 0.53096240234375, 0.530924560546875, 0.5308661499023437, 0.5307893676757812, 0.5308641357421875, 0.5307678833007813, 1.098599365234375, 0.5306071166992188, 0.5305692138671875, 0.5305211181640626, 0.53089794921875, 0.5305497436523438, 0.5308221435546875, 0.5307658081054687, 0.5307996215820312, 0.530661376953125, 0.5306900634765624, 0.5311692504882812, 0.530609130859375, 0.5308006591796876, 0.5306644287109376, 0.5308375244140625, 0.5307730102539062, 0.5307525024414063, 0.5305477294921875, 0.5310003051757812, 0.53079345703125, 0.5306757202148438, 0.5304637451171875, 0.5307637939453125, 0.5306132202148437, 0.530956298828125, 0.530988037109375, 0.5308764038085938, 0.5306808471679687, 0.5308743896484375, 0.5307279663085938, 0.5307576293945313, 0.5310965576171875, 0.5306583251953125, 0.53056103515625, 0.5305681762695312, 0.530555908203125, 0.5308845825195313, 0.5308344116210938, 0.5308221435546875, 0.530787353515625, 0.5306757202148438, 0.5306941528320313, 0.5308262329101563, 0.5310812377929688, 0.530934814453125, 0.5305538330078124, 0.5308098754882813, 0.5308231811523437, 0.5310986328125, 0.5306429443359375, 0.530845703125, 0.5307422485351563, 0.5307197265625, 0.530492431640625, 0.5308323974609375, 0.5305548706054688, 0.53087744140625, 0.5306767578125, 0.5308753662109374, 0.5305855712890625, 0.5308129272460937, 0.5303736572265625, 1.098567626953125, 0.5309030151367188, 0.5317017822265625, 0.5314866943359375, 0.531140625, 0.5313515625, 0.5313720092773437, 0.530951171875, 0.5308917846679687, 0.530724853515625, 0.5307863159179688, 0.5310587158203125, 0.530777099609375, 0.530819091796875, 0.53087744140625, 0.5309696044921876, 0.5308897094726562, 0.5310904541015625, 0.531515380859375, 0.5310259399414062, 0.5307739868164062, 0.5312061157226563, 0.5318901977539062, 0.5310761108398437, 0.5309962158203125, 0.5314529418945313, 0.5309951782226563, 0.5310013427734375, 0.5313760986328125, 0.5313331298828124, 0.5312849731445313, 0.5316055297851563, 0.5308313598632812, 0.5314816284179688, 0.531324951171875, 0.5315655517578125, 0.5319567260742187, 0.5317283935546875, 0.5308856201171875, 0.5309910888671875, 0.5312266235351563, 0.53136279296875, 0.5310289916992188, 0.5309757690429687, 0.530524169921875, 0.5308047485351562, 0.5308436279296875, 0.5307371215820312, 0.5306552124023437, 0.5307576293945313, 0.530914306640625, 0.530629638671875, 0.530746337890625, 0.5308231811523437, 0.5308712768554688, 0.5312450561523437, 0.5309327392578125, 0.53103515625, 0.5309214477539063, 0.5310279541015624, 0.5311580200195313, 0.5310894165039063, 0.5313126220703125, 1.0993970947265626, 0.5308150024414062, 0.53054052734375, 0.53062451171875, 0.5311447143554687, 0.5309869995117188, 0.5307893676757812, 0.5311334228515625, 0.5309542236328125, 0.5308313598632812, 0.5310842895507812, 0.5308108520507813, 0.5306603393554687, 0.531177490234375, 0.5307023315429688, 0.5314396362304687, 0.5306491088867188, 0.5312952270507812, 0.5308108520507813, 0.530840576171875, 0.5308078002929687, 0.5307863159179688, 0.5308047485351562, 0.5311262817382812, 0.53103515625, 0.5310740356445313, 0.5309634399414063, 0.531051513671875, 0.5307965698242187, 0.5314877319335938, 0.530746337890625, 0.5310259399414062, 0.5311201171875, 0.5307340698242188, 0.5309931640625, 0.5309593505859375, 0.5321942749023437, 0.5316433715820312, 0.5312388916015625, 0.530872314453125, 0.5307698974609375, 0.5307975463867187, 0.53082421875, 0.5306982421875, 0.5306019897460937, 0.5307197265625, 0.5305917358398438, 0.5307258911132813, 0.53085693359375, 0.531398681640625, 0.53166796875, 0.5317969970703125, 0.531862548828125, 0.5320745239257813, 0.5316608276367187, 0.5313966064453125, 0.5307791137695312, 0.53075048828125, 0.5309102172851563, 0.5311057739257813, 0.5310842895507812, 0.5312010498046875, 0.5308037109375, 1.1007191162109375, 0.53106689453125, 0.5309931640625, 0.5313853149414063, 0.5308897094726562, 0.5309972534179688, 0.5310842895507812, 0.531324951171875, 0.5306767578125, 0.5316229248046875, 0.5309573364257812, 0.530756591796875, 0.5307310180664062, 0.53082421875, 0.5305927734375, 0.5316853637695312, 0.5315819702148438, 0.5312542724609375, 0.5311344604492187, 0.531040283203125, 0.5308897094726562, 0.5313392944335937, 0.5316823120117188, 0.53125634765625, 0.5309573364257812, 0.5317652587890624, 0.531435546875, 0.531162109375, 0.5312890625, 0.5313095703125, 0.5313320922851562, 0.5313013916015625, 0.5311661987304688, 0.531324951171875, 0.5308743896484375, 0.53102490234375, 0.5308999633789062, 0.53098291015625, 0.5310023803710937, 0.5311641845703124, 0.5315082397460937, 0.5309317016601562, 0.5308917846679687, 0.5309010009765625, 0.5306695556640625, 0.531409912109375, 0.5308856201171875, 0.531504150390625, 0.5308948364257813, 0.5309337768554687, 0.5309696044921876, 0.5308682250976563, 0.5308712768554688, 0.5310637817382813, 0.5310596923828125, 0.530924560546875, 0.5309398803710937, 0.5311754150390625, 0.5311918334960938, 0.5315389404296875, 0.5313822631835937, 0.5316004028320312, 0.5323612060546875, 1.101580322265625, 0.5309798583984375, 0.5314283447265625, 0.53097265625, 0.5306849365234375, 0.53102490234375, 0.5308344116210938, 0.5312901000976562, 0.5309429931640625, 0.5312133178710937, 0.53045556640625, 0.5307914428710937, 0.53096142578125, 0.5311590576171875, 0.531051513671875, 0.5311539306640625, 0.5308795166015625, 0.5314437255859376, 0.5311702880859375, 0.5311856689453125, 0.5311795043945312, 0.5310105590820312, 0.5306644287109376, 0.530904052734375, 0.5307525024414063, 0.530788330078125, 0.5305599975585937, 0.5308651733398437, 0.5308682250976563, 0.5307955322265625, 0.5306275634765625, 0.5308323974609375, 0.53082421875, 0.5314150390625, 0.5311631469726562, 0.53131982421875, 0.5312214965820312, 0.5309327392578125, 0.5311282958984375, 0.5312542724609375, 0.530845703125, 0.5310156860351563, 0.53068798828125, 0.5308897094726562, 0.5308231811523437, 0.5310628051757813, 0.531072021484375, 0.5317027587890625, 0.5310494995117188, 0.5310187377929687, 0.5309296875, 0.531330078125, 0.5311447143554687, 0.53096044921875, 0.5310719604492188, 0.531030029296875, 0.5310156860351563, 0.53125634765625, 0.5312440185546875, 0.5312337646484375, 0.5311641845703124, 0.5312235717773437, 0.530788330078125, 1.10097412109375, 0.5311385498046876, 0.5308712768554688, 0.5309481201171875, 0.5307709350585937, 0.5306480712890626, 0.5311713256835937, 0.5311160278320313, 0.5307095336914063, 0.5308487548828125, 0.53075048828125, 0.5309020385742188, 0.5306849365234375, 0.5309808349609375, 0.53065625, 0.5309685668945312, 0.5306695556640625, 0.5307422485351563, 0.5307944946289063, 0.53087744140625, 0.53075048828125, 0.5307781372070313, 0.5308733520507812, 0.5309255981445312, 0.5307822265625, 0.531041259765625, 0.5312808837890625, 0.5311181030273437, 0.531198974609375, 0.5309603881835937, 0.5314375610351563, 0.5310955810546875, 0.5312604370117188, 0.5309583129882812, 0.5311651611328125, 0.530998291015625, 0.53083544921875, 0.5311876831054687, 0.531009521484375, 0.531167236328125, 0.5306757202148438, 0.531293212890625, 0.5311754150390625, 0.53102490234375, 0.5311047973632812, 0.5315072021484375, 0.531167236328125, 0.5316218872070313, 0.5310310668945313, 0.531662841796875, 0.5310525512695312, 0.5312481079101562, 0.5310341186523437, 0.5309962158203125, 0.5309798583984375, 0.5312184448242188, 0.5310965576171875, 0.5310013427734375, 0.5311498413085938, 0.53165771484375, 0.5311181030273437, 0.531272705078125, 0.5313116455078125]",tokens/s,1.8546927529553001,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1767.067648,22129.672192,0.0,21483.225088,20799.168,s,10,28.05056616210938,2.805056616210938,0.0027095714336187707,2.804907958984375,2.8082408447265625,2.808313903808594,2.808372351074219,"[2.805282470703125, 2.808224609375, 2.803196533203125, 2.80234619140625, 2.80277294921875, 2.800419677734375, 2.804533447265625, 2.80722705078125, 2.80817626953125, 2.808386962890625]",tokens/s,91.26375507735885,kWh,3.3081144508388305e-05,1.8129723654910774e-05,0.00015725384802519894,0.000208464716188498,tokens/kWh,1228025.5607789264,MB,1772.261376,22129.672192,0.0,21483.225088,20902.144,s,10,1667.162484375,166.71624843749996,0.015085835868376713,166.71732031250002,166.73279531249997,166.73349140624998,166.73404828124998,"[166.730125, 166.715734375, 166.68440625, 166.724703125, 166.7341875, 166.71721875, 166.708265625, 166.717421875, 166.69778125, 166.732640625]",tokens/s,0.377887581987055,kWh,0.0019678901955319777,0.0010785770549249355,0.009270038554913802,0.012316505805370715,tokens/kWh,5115.087103074991,,s,629,1689.7617126464822,2.686425616290118,0.33327430639158717,2.64620947265625,2.6474256835937497,2.647894970703125,5.4507228125000005,"[2.646453369140625, 2.646373291015625, 2.64631591796875, 2.6454580078125, 2.646531982421875, 2.647617431640625, 2.64826171875, 2.647658447265625, 2.647793701171875, 2.64637646484375, 2.64711376953125, 2.64620947265625, 2.646602783203125, 2.646426513671875, 2.64635498046875, 2.64557568359375, 2.644958251953125, 2.647402587890625, 2.64601904296875, 2.646287353515625, 2.647202880859375, 2.64688525390625, 2.646246337890625, 2.646867919921875, 2.64715478515625, 2.647743408203125, 2.646212646484375, 2.647287841796875, 2.648404052734375, 2.64638671875, 2.64559912109375, 2.647835693359375, 2.648161376953125, 2.6474189453125, 2.647456787109375, 2.6468076171875, 2.646687744140625, 2.64502880859375, 2.646285400390625, 2.646274169921875, 2.64652490234375, 2.64616455078125, 2.64710546875, 2.6460908203125, 2.646740966796875, 2.645139404296875, 2.646036376953125, 2.646411376953125, 2.64627099609375, 2.64452197265625, 2.64525830078125, 2.647901123046875, 2.64601806640625, 2.645980224609375, 2.64719677734375, 2.64639990234375, 2.64538720703125, 2.645042236328125, 2.64540576171875, 2.646120361328125, 2.646118408203125, 2.64612353515625, 5.45506494140625, 2.64526025390625, 2.6462626953125, 2.64690576171875, 2.646042724609375, 2.64546826171875, 2.646393798828125, 2.647192626953125, 2.644919189453125, 2.644935791015625, 2.645579833984375, 2.647086181640625, 2.646035400390625, 2.644955078125, 2.645644287109375, 2.645537841796875, 2.644991943359375, 2.645560302734375, 2.646414306640625, 2.645199951171875, 2.646506591796875, 2.646548583984375, 2.64702880859375, 2.64570166015625, 2.644788330078125, 2.64700732421875, 2.646330322265625, 2.64543017578125, 2.645243896484375, 2.646043701171875, 2.645831787109375, 2.6459453125, 2.64614697265625, 2.64631494140625, 2.64669091796875, 2.646116455078125, 2.647741455078125, 2.6478857421875, 2.645671875, 2.647362548828125, 2.646822998046875, 2.648393798828125, 2.647374755859375, 2.64627294921875, 2.646519775390625, 2.645937255859375, 2.6523720703125, 2.647458740234375, 2.646916015625, 2.6467646484375, 2.644612060546875, 2.645245849609375, 2.64698779296875, 2.646232177734375, 2.645567626953125, 2.645909423828125, 2.64679541015625, 2.64620947265625, 2.645465087890625, 2.646067138671875, 2.6467060546875, 2.6458798828125, 2.6457109375, 5.4513837890625, 2.647235595703125, 2.645803955078125, 2.64654443359375, 2.64496533203125, 2.64464892578125, 2.645158935546875, 2.646266845703125, 2.64560546875, 2.646096923828125, 2.64475537109375, 2.64629052734375, 2.644760498046875, 2.64480859375, 2.6463427734375, 2.64760009765625, 2.645761962890625, 2.645760986328125, 2.646381591796875, 2.646036376953125, 2.64513134765625, 2.646679443359375, 2.6458369140625, 2.6495498046875, 2.646917236328125, 2.644770751953125, 2.645843017578125, 2.644592529296875, 2.6452919921875, 2.6457333984375, 2.64617578125, 2.64542822265625, 2.645474365234375, 2.644977783203125, 2.6475703125, 2.645327880859375, 2.64601611328125, 2.64604052734375, 2.644948974609375, 2.645347412109375, 2.64539453125, 2.64572314453125, 2.645024658203125, 2.64540771484375, 2.64523681640625, 2.64541796875, 2.644529052734375, 2.649079833984375, 2.645666748046875, 2.646816650390625, 2.645528564453125, 2.645729248046875, 2.6472724609375, 2.646464599609375, 2.644356201171875, 2.644589599609375, 2.64574267578125, 2.645088134765625, 2.6443017578125, 2.644094970703125, 2.64595556640625, 2.645572509765625, 2.644812744140625, 5.4503955078125, 2.645255126953125, 2.645544921875, 2.64656884765625, 2.648037353515625, 2.645263427734375, 2.64638671875, 2.6460732421875, 2.6460478515625, 2.645971923828125, 2.645147705078125, 2.646581298828125, 2.6468291015625, 2.64502880859375, 2.645927978515625, 2.64683935546875, 2.64574365234375, 2.645297119140625, 2.64578662109375, 2.645689453125, 2.64529296875, 2.645930908203125, 2.64618798828125, 2.64557373046875, 2.645583984375, 2.64784375, 2.6465341796875, 2.645923828125, 2.648280029296875, 2.647232421875, 2.64726220703125, 2.646688720703125, 2.646456298828125, 2.647128173828125, 2.64785009765625, 2.6451201171875, 2.645867431640625, 2.645412841796875, 2.647185302734375, 2.644790283203125, 2.645506103515625, 2.64781005859375, 2.6459228515625, 2.64651171875, 2.6510693359375, 2.647773193359375, 2.6475908203125, 2.648330322265625, 2.646329345703125, 2.64608349609375, 2.64578759765625, 2.64604150390625, 2.646233154296875, 2.647012451171875, 2.644991943359375, 2.6461533203125, 2.646445068359375, 2.6459423828125, 2.646255615234375, 2.647103515625, 2.64721826171875, 2.6459013671875, 2.646496337890625, 5.45085009765625, 2.646131591796875, 2.647123046875, 2.64625244140625, 2.6469755859375, 2.645900146484375, 2.64694384765625, 2.6482001953125, 2.647193603515625, 2.646340576171875, 2.646576171875, 2.647742431640625, 2.645885986328125, 2.64563916015625, 2.645662841796875, 2.646833251953125, 2.64673291015625, 2.648217529296875, 2.645130126953125, 2.648642578125, 2.64700732421875, 2.6464482421875, 2.645592041015625, 2.646584228515625, 2.64501953125, 2.64555224609375, 2.64648388671875, 2.6452490234375, 2.646088623046875, 2.64539453125, 2.646591552734375, 2.645818359375, 2.6455, 2.645107666015625, 2.647025634765625, 2.64500830078125, 2.647201904296875, 2.6471865234375, 2.647762939453125, 2.6468916015625, 2.646411376953125, 2.645792724609375, 2.647160888671875, 2.64576611328125, 2.645645263671875, 2.646044677734375, 2.647015380859375, 2.64671435546875, 2.645159912109375, 2.646703125, 2.646371337890625, 2.646978515625, 2.6460908203125, 2.648290283203125, 2.646096923828125, 2.646834228515625, 2.6468515625, 2.654116943359375, 2.646571044921875, 2.646265869140625, 2.647033935546875, 2.645507080078125, 2.64506884765625, 5.44818896484375, 2.646950927734375, 2.64728466796875, 2.64570068359375, 2.647045166015625, 2.645780517578125, 2.64650341796875, 2.647185302734375, 2.647396240234375, 2.64726416015625, 2.645887939453125, 2.646220947265625, 2.646308837890625, 2.6452724609375, 2.645916748046875, 2.6456513671875, 2.64530029296875, 2.645833740234375, 2.64532275390625, 2.646921142578125, 2.645572509765625, 2.646182861328125, 2.646026123046875, 2.646593505859375, 2.646531005859375, 2.646508544921875, 2.646131591796875, 2.647275634765625, 2.646612060546875, 2.646274169921875, 2.645583984375, 2.646960205078125, 2.647244873046875, 2.6451845703125, 2.6467060546875, 2.646052001953125, 2.64616650390625, 2.645667724609375, 2.652001220703125, 2.64635693359375, 2.645919677734375, 2.646162353515625, 2.64641845703125, 2.645307373046875, 2.644989013671875, 2.646035400390625, 2.645531494140625, 2.645157958984375, 2.645906494140625, 2.645951416015625, 2.646036376953125, 2.645796875, 2.64467041015625, 2.645960693359375, 2.64549267578125, 2.64652294921875, 2.64587255859375, 2.647626708984375, 2.646477783203125, 2.646447021484375, 2.646128662109375, 2.64745263671875, 2.646246337890625, 5.45296484375, 2.646928466796875, 2.647333984375, 2.646259765625, 2.64629443359375, 2.64618701171875, 2.6448466796875, 2.646443115234375, 2.646686767578125, 2.6447626953125, 2.64540576171875, 2.64494287109375, 2.645835693359375, 2.645675048828125, 2.64589404296875, 2.64648095703125, 2.64646044921875, 2.646432861328125, 2.646467529296875, 2.645669921875, 2.64584814453125, 2.6454580078125, 2.64523486328125, 2.650271728515625, 2.647341064453125, 2.64589306640625, 2.64591064453125, 2.64768505859375, 2.64700830078125, 2.64631201171875, 2.645865478515625, 2.647047119140625, 2.646507568359375, 2.64665087890625, 2.647560302734375, 2.645760986328125, 2.645769287109375, 2.645876708984375, 2.645303466796875, 2.646839111328125, 2.646023193359375, 2.645792724609375, 2.64610400390625, 2.646148193359375, 2.646067138671875, 2.64551416015625, 2.645665771484375, 2.64494482421875, 2.645905517578125, 2.645917724609375, 2.645688232421875, 2.64422705078125, 2.64477587890625, 2.64602001953125, 2.646118408203125, 2.645832763671875, 2.64635888671875, 2.646981689453125, 2.645694580078125, 2.6462197265625, 2.6456484375, 2.647057373046875, 2.646053955078125, 5.45440771484375, 2.64606103515625, 2.64624853515625, 2.645525390625, 2.6462197265625, 2.646288330078125, 2.64601806640625, 2.6458369140625, 2.6465341796875, 2.64682080078125, 2.6460498046875, 2.645517333984375, 2.646379638671875, 2.645887939453125, 2.645572509765625, 2.645886962890625, 2.6497626953125, 2.645772216796875, 2.64593603515625, 2.64641748046875, 2.64673681640625, 2.645414794921875, 2.644895751953125, 2.6456689453125, 2.646624267578125, 2.64646142578125, 2.645294921875, 2.646984619140625, 2.646635498046875, 2.64559619140625, 2.644748291015625, 2.64690576171875, 2.64658642578125, 2.645821533203125, 2.6462392578125, 2.647307373046875, 2.64610205078125, 2.647371826171875, 2.64780078125, 2.64745263671875, 2.646867919921875, 2.64559814453125, 2.646322265625, 2.6461328125, 2.6461328125, 2.6467666015625, 2.64734619140625, 2.647150634765625, 2.6465341796875, 2.646138916015625, 2.645937255859375, 2.64439697265625, 2.645222412109375, 2.646958984375, 2.646992919921875, 2.647083984375, 2.64567822265625, 2.646795166015625, 2.645927978515625, 2.646295654296875, 2.64530224609375, 2.646948974609375, 2.646352783203125, 5.45413623046875, 2.646625244140625, 2.6464501953125, 2.647415771484375, 2.64749462890625, 2.64658837890625, 2.645314453125, 2.647509033203125, 2.647132080078125, 2.64675537109375, 2.646547607421875, 2.646182861328125, 2.64610302734375, 2.6463896484375, 2.646077392578125, 2.64599853515625, 2.646762451171875, 2.64707080078125, 2.646077392578125, 2.64540576171875, 2.646277099609375, 2.64477392578125, 2.645159912109375, 2.6459638671875, 2.64671240234375, 2.646478759765625, 2.645445556640625, 2.646115234375, 2.646445068359375, 2.645536865234375, 2.64454443359375, 2.646118408203125, 2.645505126953125, 2.644704345703125, 2.6460517578125, 2.64701953125, 2.645275634765625, 2.64492431640625, 2.645380126953125, 2.645751708984375, 2.64485888671875, 2.64445654296875, 2.645286865234375, 2.646550537109375, 2.646221923828125, 2.645315673828125, 2.6454169921875, 2.64634375, 2.645382080078125, 2.6473984375, 2.646042724609375, 2.64547119140625, 2.644828125, 2.64538720703125, 2.646898681640625, 2.646388671875, 2.64596484375, 2.6477158203125, 2.64626171875, 2.6452685546875, 2.645350341796875, 2.6460498046875, 2.644905029296875, 5.4557060546875, 2.64730419921875, 2.64740966796875, 2.646350830078125, 2.645677978515625, 2.6465546875, 2.64831689453125, 2.64722119140625, 2.646279052734375, 2.645445556640625, 2.646408203125, 2.646245361328125, 2.646921142578125, 2.64698974609375, 2.646667236328125, 2.646288330078125, 2.64876953125, 2.647458740234375, 2.645729248046875, 2.645494873046875, 2.646150146484375, 2.6453955078125, 2.6452890625, 2.6467861328125, 2.647785400390625, 2.646699951171875, 2.65012744140625, 2.64613671875, 2.646697998046875, 2.6466845703125, 2.646642578125, 2.6476943359375, 2.64681787109375, 2.645788818359375, 2.646158203125, 2.647160888671875, 2.645675048828125, 2.64652490234375, 2.64439306640625, 2.64669189453125, 2.646427734375, 2.64625146484375, 2.646435791015625, 2.645409912109375, 2.646538330078125, 2.64618505859375, 2.6468310546875, 2.647814208984375, 2.645505126953125, 2.645622802734375, 2.6466826171875, 2.646696044921875, 2.6457548828125, 2.6469130859375, 2.6469990234375, 2.64591259765625, 2.64690185546875, 2.64622705078125, 2.64720703125, 2.646657958984375, 2.645370849609375, 2.645916748046875, 2.646266845703125]",tokens/s,0.3722418346281902,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1303.977984,1030.22592,0.0,383.778816,312.280064,s,10,0.30782016181945804,0.030782016181945804,0.001617235684294077,0.030563695907592774,0.031164259910583494,0.03323989057540893,0.03490039510726929,"[0.03531552124023438, 0.03054502487182617, 0.030670495986938478, 0.029116479873657227, 0.030544319152832032, 0.02917731285095215, 0.030620607376098632, 0.030559423446655274, 0.030567968368530273, 0.030703008651733397]",tokens/s,8316.544260351227,kWh,3.557098667864181e-07,1.9491237936146868e-07,8.339715507108577e-07,1.3845937968587447e-06,tokens/kWh,184891771.5656334,MB,1303.977984,1030.22592,0.0,383.778816,347.090432,s,10,18.765849975585937,1.8765849975585938,0.01629298714952931,1.8788928833007814,1.8852921142578125,1.8978253173828123,1.9078518798828124,"[1.9103585205078124, 1.8825069580078124, 1.879499267578125, 1.8580986328125, 1.8786011962890625, 1.8434554443359374, 1.8807445068359374, 1.8754886474609376, 1.8791845703125, 1.8779122314453125]",tokens/s,33.5716208335684,kWh,2.201072483317281e-05,1.2059537590589184e-05,4.825437327548859e-05,8.232463569925058e-05,tokens/kWh,765263.0280703872,,s,629,19.00581683158875,0.030215925010474952,0.003670631507539437,0.02977996826171875,0.03002470397949219,0.03029995498657227,0.05998026794433601,"[0.03094118309020996, 0.03138150405883789, 0.03138355255126953, 0.031524864196777344, 0.030980096817016602, 0.03207884979248047, 0.032363521575927735, 0.03149004745483398, 0.03120844841003418, 0.031268863677978515, 0.03084492874145508, 0.032194561004638675, 0.031160320281982422, 0.03057868766784668, 0.030236671447753907, 0.03517747116088867, 0.030717952728271485, 0.030108671188354492, 0.03002470397949219, 0.029833215713500977, 0.029899776458740233, 0.02977177619934082, 0.02977791976928711, 0.02977382469177246, 0.030176256179809572, 0.0299683837890625, 0.029740032196044923, 0.02977894401550293, 0.029844480514526366, 0.029732864379882814, 0.02974412727355957, 0.029797376632690428, 0.029791231155395507, 0.029726720809936522, 0.029648895263671874, 0.029749248504638674, 0.02977791976928711, 0.029949951171875, 0.02977996826171875, 0.02976255989074707, 0.02981990432739258, 0.029797376632690428, 0.0297728328704834, 0.02997039985656738, 0.029839359283447265, 0.029833215713500977, 0.029870080947875976, 0.02978508758544922, 0.02968780708312988, 0.029784063339233398, 0.02979327964782715, 0.029755392074584962, 0.02972774314880371, 0.029976640701293945, 0.02982700729370117, 0.029852672576904295, 0.029896703720092774, 0.029731840133666993, 0.029747200012207032, 0.02983628845214844, 0.029772800445556642, 0.029739007949829102, 0.06072115325927734, 0.029829120635986327, 0.02977689552307129, 0.029834239959716798, 0.030591999053955078, 0.03021414375305176, 0.029755392074584962, 0.02983628845214844, 0.02980352020263672, 0.029842432022094727, 0.029707263946533204, 0.029665279388427734, 0.029698047637939453, 0.02979430389404297, 0.029593599319458007, 0.02993152046203613, 0.0301527042388916, 0.029792255401611328, 0.029764608383178712, 0.029861888885498046, 0.03102822494506836, 0.030027776718139648, 0.02980147171020508, 0.030087167739868165, 0.02975129508972168, 0.030086143493652344, 0.029826047897338868, 0.02974617576599121, 0.030042112350463866, 0.029730815887451172, 0.029899776458740233, 0.029838336944580077, 0.02994790458679199, 0.02980659294128418, 0.02976972770690918, 0.029682687759399414, 0.02978816032409668, 0.029817855834960938, 0.029734912872314452, 0.029831167221069335, 0.02974208068847656, 0.02978508758544922, 0.029848575592041016, 0.029800447463989257, 0.029785120010375976, 0.029689823150634766, 0.03014553642272949, 0.029886463165283202, 0.03018035125732422, 0.029938688278198244, 0.02973695945739746, 0.030068735122680663, 0.02973695945739746, 0.029897727966308595, 0.0297891845703125, 0.0298024959564209, 0.02995814323425293, 0.02976051139831543, 0.02976870346069336, 0.02997555160522461, 0.029975584030151367, 0.029692895889282228, 0.029708288192749024, 0.061895679473876954, 0.030073856353759764, 0.02974412727355957, 0.029680639266967773, 0.029848575592041016, 0.029882368087768556, 0.029543424606323244, 0.02978713607788086, 0.029755392074584962, 0.029875200271606447, 0.029856767654418945, 0.029831167221069335, 0.029770751953125, 0.02977791976928711, 0.02973695945739746, 0.029897727966308595, 0.030079999923706056, 0.029894655227661132, 0.029703168869018554, 0.0297574405670166, 0.029837312698364257, 0.029813760757446288, 0.029814783096313476, 0.029743167877197267, 0.029764543533325194, 0.029707263946533204, 0.02977791976928711, 0.029730815887451172, 0.030044160842895507, 0.029833215713500977, 0.02974617576599121, 0.029815807342529296, 0.029740032196044923, 0.029815807342529296, 0.029816831588745117, 0.029799423217773437, 0.029713407516479492, 0.0297891845703125, 0.02983628845214844, 0.029896703720092774, 0.029834239959716798, 0.0297574405670166, 0.029868032455444334, 0.029748224258422853, 0.029849599838256836, 0.02977382469177246, 0.02986604881286621, 0.029807552337646485, 0.030026752471923827, 0.02976255989074707, 0.02976972770690918, 0.029892608642578124, 0.029701120376586915, 0.02976870346069336, 0.02976255989074707, 0.029797376632690428, 0.02977177619934082, 0.029867008209228517, 0.029808639526367187, 0.02974515151977539, 0.029740032196044923, 0.029821952819824218, 0.030018560409545897, 0.05799935913085937, 0.028490751266479493, 0.028519424438476562, 0.028368896484375, 0.028404735565185548, 0.02834022331237793, 0.028285951614379884, 0.02834739112854004, 0.028308479309082032, 0.028387327194213868, 0.028321792602539062, 0.028298240661621094, 0.028923904418945313, 0.031113216400146484, 0.030136320114135744, 0.02981990432739258, 0.029868032455444334, 0.029728832244873046, 0.02970515251159668, 0.029386751174926756, 0.0297523193359375, 0.029822975158691405, 0.02971238327026367, 0.02978508758544922, 0.02980352020263672, 0.02977484893798828, 0.029677600860595704, 0.029713375091552734, 0.029651968002319336, 0.02973798370361328, 0.029881343841552735, 0.029772800445556642, 0.02979532814025879, 0.029672447204589843, 0.029861888885498046, 0.029646848678588866, 0.029551616668701174, 0.029764608383178712, 0.02973695945739746, 0.029662208557128908, 0.02970419120788574, 0.029772800445556642, 0.02975436782836914, 0.02972876739501953, 0.02979430389404297, 0.029699071884155274, 0.02981888008117676, 0.029753376007080078, 0.02983011245727539, 0.029718528747558592, 0.029489152908325194, 0.029640703201293944, 0.029736991882324218, 0.02938057518005371, 0.029748224258422853, 0.029775871276855468, 0.02980659294128418, 0.029814783096313476, 0.029716512680053712, 0.029680608749389648, 0.02979635238647461, 0.029783039093017577, 0.02978816032409668, 0.06087680053710937, 0.029916160583496092, 0.030342144012451173, 0.029867008209228517, 0.029744159698486327, 0.029726688385009765, 0.02975846481323242, 0.029857791900634766, 0.0297891845703125, 0.029784063339233398, 0.02979020881652832, 0.029849599838256836, 0.029449216842651366, 0.02939084815979004, 0.029541376113891602, 0.029874176025390626, 0.029816831588745117, 0.029724672317504884, 0.029915136337280275, 0.029921279907226563, 0.029829120635986327, 0.029734912872314452, 0.02981888008117676, 0.02980147171020508, 0.029860864639282225, 0.029874176025390626, 0.029817855834960938, 0.029855743408203125, 0.029718528747558592, 0.029854719161987304, 0.029833215713500977, 0.02995199966430664, 0.02972979164123535, 0.02980147171020508, 0.029864959716796875, 0.030071807861328126, 0.029874208450317383, 0.02984239959716797, 0.02975027275085449, 0.02978508758544922, 0.029832191467285156, 0.029860864639282225, 0.02976255989074707, 0.029702144622802733, 0.029920255661010742, 0.02975948715209961, 0.02994790458679199, 0.02977382469177246, 0.02973388862609863, 0.029643775939941407, 0.029703168869018554, 0.029852672576904295, 0.029839359283447265, 0.02983628845214844, 0.029784063339233398, 0.029769792556762695, 0.029781951904296874, 0.02998784065246582, 0.02981068801879883, 0.02998784065246582, 0.0297574405670166, 0.029809696197509766, 0.029711328506469726, 0.05807513427734375, 0.02834329605102539, 0.028271615982055662, 0.02834329605102539, 0.028402687072753906, 0.028421119689941408, 0.028279808044433592, 0.028318719863891603, 0.028318719863891603, 0.02834022331237793, 0.02835148811340332, 0.02838118362426758, 0.02836172866821289, 0.028310527801513673, 0.02832793617248535, 0.02815795135498047, 0.028197887420654297, 0.02831667137145996, 0.028222463607788087, 0.028233728408813476, 0.028296192169189452, 0.028251136779785156, 0.028318719863891603, 0.02973798370361328, 0.030136320114135744, 0.029817855834960938, 0.029731840133666993, 0.02978099250793457, 0.02972979164123535, 0.029930496215820314, 0.029886463165283202, 0.029709312438964845, 0.029739007949829102, 0.029848575592041016, 0.02972774314880371, 0.02976870346069336, 0.029648895263671874, 0.029732864379882814, 0.029813760757446288, 0.029820928573608397, 0.029755392074584962, 0.029713407516479492, 0.029643775939941407, 0.02977996826171875, 0.029856767654418945, 0.029802528381347657, 0.029723615646362306, 0.029940736770629882, 0.029823007583618163, 0.029761503219604492, 0.02976972770690918, 0.029691904067993165, 0.02968783950805664, 0.02980246353149414, 0.029711360931396483, 0.029895679473876953, 0.029684736251831056, 0.029894655227661132, 0.029791231155395507, 0.029692928314208986, 0.029740032196044923, 0.029820928573608397, 0.030217216491699218, 0.06100377655029297, 0.029852672576904295, 0.02978508758544922, 0.029661184310913087, 0.029660160064697266, 0.02972368049621582, 0.029649887084960937, 0.02981171226501465, 0.02997555160522461, 0.02975846481323242, 0.02975846481323242, 0.029682687759399414, 0.029800447463989257, 0.02975129508972168, 0.030038015365600586, 0.02983526420593262, 0.02981888008117676, 0.029861888885498046, 0.029864959716796875, 0.029840383529663086, 0.030003200531005858, 0.0307906551361084, 0.03020697593688965, 0.029899776458740233, 0.02975846481323242, 0.02978508758544922, 0.029917184829711913, 0.02975129508972168, 0.029702144622802733, 0.02978713607788086, 0.02976972770690918, 0.02976665687561035, 0.029708288192749024, 0.029823999404907226, 0.029826047897338868, 0.02983628845214844, 0.03013222312927246, 0.03099545669555664, 0.030038015365600586, 0.029878271102905272, 0.029697023391723632, 0.02977791976928711, 0.029929471969604493, 0.029862911224365234, 0.029932544708251952, 0.029714431762695313, 0.02996940803527832, 0.02980659294128418, 0.02976051139831543, 0.02951372718811035, 0.029684736251831056, 0.029820928573608397, 0.02977996826171875, 0.029778976440429688, 0.029754335403442383, 0.029816831588745117, 0.02976563262939453, 0.029755456924438477, 0.02987615966796875, 0.029709312438964845, 0.029723743438720703, 0.029749151229858398, 0.02977382469177246, 0.06091059112548828, 0.03012403106689453, 0.0297574405670166, 0.029799423217773437, 0.02974515151977539, 0.02982809638977051, 0.029861888885498046, 0.029743104934692382, 0.02978201675415039, 0.029713407516479492, 0.029759519577026366, 0.029883359909057616, 0.02977484893798828, 0.02979532814025879, 0.029874176025390626, 0.029875200271606447, 0.029850624084472657, 0.02984351921081543, 0.02972358322143555, 0.029660160064697266, 0.02992742347717285, 0.02974515151977539, 0.02968780708312988, 0.029831167221069335, 0.029741056442260744, 0.029732864379882814, 0.029642751693725586, 0.029642751693725586, 0.029895679473876953, 0.029628416061401368, 0.029723648071289063, 0.029618175506591796, 0.029715456008911133, 0.029864959716796875, 0.029732864379882814, 0.02978201675415039, 0.029748224258422853, 0.029813760757446288, 0.02982707214355469, 0.0297574405670166, 0.029854719161987304, 0.02972876739501953, 0.029657087326049804, 0.029668352127075196, 0.029611007690429687, 0.029412351608276367, 0.02972876739501953, 0.029885440826416015, 0.029694976806640624, 0.02976870346069336, 0.029775871276855468, 0.02993152046203613, 0.02977894401550293, 0.029700096130371095, 0.02977382469177246, 0.029710336685180663, 0.029688831329345702, 0.029684736251831056, 0.029700096130371095, 0.02970419120788574, 0.029667327880859375, 0.029730815887451172, 0.029863935470581054, 0.060827648162841794, 0.02976563262939453, 0.029716480255126954, 0.029770751953125, 0.02973388862609863, 0.02977689552307129, 0.02975846481323242, 0.029849599838256836, 0.0298024959564209, 0.029850624084472657, 0.029688831329345702, 0.029731840133666993, 0.029740032196044923, 0.029731840133666993, 0.029859840393066408, 0.02975027275085449, 0.029707263946533204, 0.029718528747558592, 0.02973388862609863, 0.029772800445556642, 0.029853696823120116, 0.02976563262939453, 0.029611007690429687, 0.02977996826171875, 0.02979430389404297, 0.029807615280151366, 0.029343807220458984, 0.029495231628417967, 0.029894655227661132, 0.03171123123168945, 0.030046207427978516, 0.03002470397949219, 0.029706239700317383, 0.02974412727355957, 0.029679616928100585, 0.029911039352416992, 0.02976051139831543, 0.029823999404907226, 0.029944831848144532, 0.02991926383972168, 0.029790176391601562, 0.02977382469177246, 0.029692928314208986, 0.029726720809936522, 0.029773855209350587, 0.029737951278686524, 0.02981990432739258, 0.029906944274902345, 0.029869056701660155, 0.029907968521118163, 0.029767679214477538, 0.02978201675415039, 0.02968780708312988, 0.02976563262939453, 0.029872127532958984, 0.02996428871154785, 0.02977689552307129, 0.02996326446533203, 0.029944831848144532, 0.029889568328857422, 0.029706207275390625, 0.029854719161987304, 0.029902847290039062, 0.06100787353515625, 0.029770816802978516, 0.029812671661376952, 0.02975027275085449, 0.029656063079833983, 0.029666303634643554, 0.02972979164123535, 0.0297256965637207, 0.029929471969604493, 0.029775871276855468, 0.029690879821777344, 0.029693952560424806, 0.02958028793334961, 0.029640703201293944, 0.02977996826171875, 0.029739007949829102, 0.03000115203857422, 0.029897727966308595, 0.029840383529663086, 0.029814783096313476, 0.0297523193359375, 0.029708288192749024, 0.02975027275085449, 0.02981068801879883, 0.029732864379882814, 0.029838336944580077, 0.029718528747558592, 0.02970419120788574, 0.0297205753326416, 0.029740032196044923, 0.02974617576599121, 0.02969599914550781, 0.029757503509521485, 0.0298853759765625, 0.02979840087890625, 0.029700096130371095, 0.029752351760864257, 0.02980656051635742, 0.02973695945739746, 0.029663232803344725, 0.02972159957885742, 0.030027776718139648, 0.029971519470214845, 0.029904832839965822, 0.029829120635986327, 0.02974412727355957, 0.030027776718139648, 0.029853696823120116, 0.029837312698364257, 0.029840383529663086, 0.029850624084472657, 0.02986911964416504, 0.030047168731689455, 0.030050304412841795, 0.029815807342529296, 0.029775871276855468, 0.02998784065246582, 0.029890560150146486, 0.02975334358215332, 0.029883392333984377, 0.02983526420593262, 0.029847551345825195, 0.02976972770690918]",tokens/s,33.09513111557333,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ebb-51ab3e7b6ce6696c18ca03ed;36274b79-a77e-434d-9d66-08cde9ea27b4) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1263.47264,2645.03296,0.0,1998.585856,1692.285952,s,10,0.24304076957702642,0.02430407695770264,0.0005675228251535707,0.024276448249816895,0.024567701721191406,0.025167994689941405,0.025648229064941408,"[0.025768287658691408, 0.024286592483520508, 0.02426630401611328, 0.024318464279174806, 0.024434303283691405, 0.02419811248779297, 0.02437276840209961, 0.023690656661987306, 0.024211936950683594, 0.023493343353271485]",tokens/s,10533.212203266434,kWh,2.780947089195252e-07,1.5238005259914857e-07,8.187235307952231e-07,1.2491982923138968e-06,tokens/kWh,204931436.0859474,MB,1263.767552,2645.03296,0.0,1998.585856,1740.091904,s,10,13.987102905273439,1.3987102905273439,0.014214042604732734,1.4048785400390624,1.412000280761719,1.4145778381347656,1.416639884033203,"[1.4171553955078124, 1.4012147216796875, 1.405033935546875, 1.411427490234375, 1.4068282470703124, 1.40472314453125, 1.405282958984375, 1.3729639892578125, 1.3760699462890624, 1.386403076171875]",tokens/s,45.04149317171867,kWh,1.62772132953008e-05,8.91975746257787e-06,3.22519140368052e-05,5.7448884794683886e-05,tokens/kWh,1096627.0315804249,,s,629,14.179969017028807,0.022543670933273148,0.0029405698135304874,0.02225868797302246,0.02251775932312012,0.02303631362915039,0.046451998291015664,"[0.023488512039184572, 0.023197696685791015, 0.02246143913269043, 0.02229555130004883, 0.022401023864746093, 0.02228326416015625, 0.02222489547729492, 0.02231091117858887, 0.0225218563079834, 0.023541759490966797, 0.023616512298583983, 0.02328985595703125, 0.023191551208496093, 0.023250944137573244, 0.023595008850097656, 0.023343103408813477, 0.023159807205200195, 0.02307788848876953, 0.023666688919067383, 0.023583744049072267, 0.023846912384033202, 0.022800384521484376, 0.02242355155944824, 0.022296575546264647, 0.02229043197631836, 0.022129663467407225, 0.022167552947998048, 0.022289407730102538, 0.022231039047241212, 0.022359039306640623, 0.022163455963134765, 0.022146047592163084, 0.022198272705078126, 0.022213632583618165, 0.022329343795776366, 0.021812223434448243, 0.021354496002197267, 0.02143436813354492, 0.02147225570678711, 0.021506048202514647, 0.021397504806518555, 0.02148659133911133, 0.02149478340148926, 0.021702655792236326, 0.023331840515136718, 0.022391807556152343, 0.022338560104370117, 0.022211584091186523, 0.022155263900756835, 0.022253568649291993, 0.02229145622253418, 0.022244352340698242, 0.022239231109619142, 0.02221670341491699, 0.022311935424804686, 0.022478847503662108, 0.022331392288208008, 0.02230681610107422, 0.022419456481933595, 0.02239897537231445, 0.02230169677734375, 0.0223242244720459, 0.04737433624267578, 0.022154239654541014, 0.02226483154296875, 0.02230271911621094, 0.02144972801208496, 0.02142617607116699, 0.021544960021972655, 0.021599231719970705, 0.02222489547729492, 0.022396928787231447, 0.022296575546264647, 0.022246400833129884, 0.022206464767456056, 0.022237184524536133, 0.022380544662475587, 0.022543359756469726, 0.022381568908691408, 0.022289407730102538, 0.022167552947998048, 0.02228428840637207, 0.022130687713623046, 0.022371328353881836, 0.02229145622253418, 0.022179840087890625, 0.022157312393188477, 0.022328319549560546, 0.02233344078063965, 0.02234982490539551, 0.022245376586914063, 0.022261760711669923, 0.02227712059020996, 0.02226483154296875, 0.02225868797302246, 0.0223191032409668, 0.022218751907348632, 0.022326271057128907, 0.022303743362426756, 0.02248806381225586, 0.022181888580322266, 0.022226943969726562, 0.02222591972351074, 0.022255615234375, 0.02224844741821289, 0.022112255096435548, 0.022326271057128907, 0.022156288146972656, 0.022769664764404295, 0.022261760711669923, 0.02227507209777832, 0.0222740478515625, 0.022338560104370117, 0.02231500816345215, 0.02227609634399414, 0.022260736465454102, 0.02228326416015625, 0.02230784034729004, 0.022261760711669923, 0.022268928527832032, 0.02252390480041504, 0.02224947166442871, 0.02229555130004883, 0.0222423038482666, 0.02222591972351074, 0.04691865539550781, 0.022223871231079103, 0.022368255615234374, 0.02228121566772461, 0.02227712059020996, 0.02224844741821289, 0.022231039047241212, 0.022236160278320313, 0.02225049591064453, 0.022305791854858398, 0.022199296951293947, 0.022161407470703123, 0.02229350471496582, 0.02231603240966797, 0.022381568908691408, 0.022329343795776366, 0.022428672790527345, 0.022311935424804686, 0.022203392028808593, 0.02231705665588379, 0.022589439392089843, 0.02243891143798828, 0.022245376586914063, 0.02247372817993164, 0.022378496170043945, 0.02221670341491699, 0.022323200225830078, 0.02222489547729492, 0.022269952774047853, 0.022195199966430663, 0.022361087799072265, 0.0221942081451416, 0.022280160903930663, 0.022350847244262697, 0.022339584350585938, 0.02242355155944824, 0.022280191421508787, 0.022199296951293947, 0.022214656829833986, 0.022261760711669923, 0.022237184524536133, 0.0222423038482666, 0.022214656829833986, 0.022581247329711913, 0.022334463119506837, 0.022419456481933595, 0.022260736465454102, 0.022581247329711913, 0.0222423038482666, 0.02211020851135254, 0.022381568908691408, 0.022228992462158204, 0.02225868797302246, 0.02223411178588867, 0.022211584091186523, 0.022369279861450195, 0.02228121566772461, 0.02229452705383301, 0.022181888580322266, 0.022215679168701173, 0.022194175720214843, 0.022372352600097657, 0.022351871490478514, 0.04692172622680664, 0.02230988883972168, 0.02225868797302246, 0.02221772766113281, 0.022237184524536133, 0.02212761688232422, 0.02230169677734375, 0.022252544403076172, 0.02224127960205078, 0.02227302360534668, 0.022236160278320313, 0.022244352340698242, 0.022401023864746093, 0.022337535858154296, 0.022404096603393556, 0.02231808090209961, 0.022379520416259766, 0.022289407730102538, 0.023015424728393553, 0.022839296340942384, 0.023625728607177734, 0.02254643249511719, 0.022501375198364256, 0.022212608337402344, 0.02230886459350586, 0.022192127227783204, 0.02186751937866211, 0.02225868797302246, 0.022220800399780274, 0.022244352340698242, 0.02232729530334473, 0.022322175979614257, 0.022200319290161134, 0.02231603240966797, 0.022213632583618165, 0.022260736465454102, 0.02225971221923828, 0.022184959411621095, 0.022322175979614257, 0.022223871231079103, 0.02229350471496582, 0.022244352340698242, 0.02253824043273926, 0.02221772766113281, 0.022656000137329102, 0.022313983917236328, 0.02267238426208496, 0.022330400466918945, 0.022226911544799804, 0.02228326416015625, 0.026193920135498046, 0.02267852783203125, 0.022219776153564453, 0.022269952774047853, 0.022245376586914063, 0.022183935165405275, 0.022200319290161134, 0.022311935424804686, 0.022221824645996095, 0.022288383483886717, 0.02233241653442383, 0.02232524871826172, 0.02229452705383301, 0.04700876617431641, 0.022387712478637696, 0.022311935424804686, 0.022374399185180666, 0.02227712059020996, 0.022320127487182616, 0.022426624298095704, 0.02230271911621094, 0.0225167350769043, 0.022564863204956053, 0.02248806381225586, 0.022361087799072265, 0.022419456481933595, 0.022410240173339844, 0.022541311264038084, 0.02245737648010254, 0.022437856674194335, 0.02248192024230957, 0.02223411178588867, 0.022358015060424806, 0.02230886459350586, 0.022378496170043945, 0.02230784034729004, 0.022552576065063477, 0.022374399185180666, 0.022311935424804686, 0.02235699272155762, 0.022362112045288086, 0.02224742317199707, 0.02226790428161621, 0.022237184524536133, 0.022477823257446287, 0.022300703048706055, 0.022344671249389648, 0.02226688003540039, 0.022345727920532226, 0.022303743362426756, 0.022238208770751954, 0.022254592895507814, 0.022183935165405275, 0.02230169677734375, 0.022226943969726562, 0.02231603240966797, 0.022246400833129884, 0.02222591972351074, 0.022192127227783204, 0.022269952774047853, 0.02223411178588867, 0.022280191421508787, 0.02234060859680176, 0.02229555130004883, 0.02225766372680664, 0.02227507209777832, 0.022221824645996095, 0.022200319290161134, 0.022208511352539064, 0.02225049591064453, 0.022527999877929687, 0.02226688003540039, 0.022324256896972657, 0.022201311111450194, 0.02225868797302246, 0.02229862403869629, 0.04681727981567383, 0.0222873592376709, 0.022252544403076172, 0.02227609634399414, 0.02228531265258789, 0.022226943969726562, 0.022236160278320313, 0.02235699272155762, 0.022329343795776366, 0.022527999877929687, 0.022365184783935548, 0.022367231369018553, 0.02240716743469238, 0.022360063552856444, 0.022312959671020507, 0.022500352859497072, 0.02227712059020996, 0.02225663948059082, 0.022190080642700196, 0.02223411178588867, 0.02221670341491699, 0.022297599792480468, 0.022380544662475587, 0.02223411178588867, 0.0222740478515625, 0.022218751907348632, 0.02250547218322754, 0.022362112045288086, 0.022189056396484375, 0.022197248458862305, 0.022175743103027345, 0.0222423038482666, 0.02221670341491699, 0.02226585578918457, 0.022161407470703123, 0.022182912826538087, 0.022428672790527345, 0.022236160278320313, 0.02267344093322754, 0.022254560470581056, 0.022212608337402344, 0.022169599533081053, 0.022261760711669923, 0.022228992462158204, 0.022177791595458983, 0.022404096603393556, 0.022181888580322266, 0.022383615493774413, 0.02244915199279785, 0.022458368301391602, 0.022296575546264647, 0.022377471923828125, 0.022180864334106445, 0.02232729530334473, 0.02223308753967285, 0.0223191032409668, 0.022232063293457033, 0.022355968475341798, 0.022238208770751954, 0.022271999359130858, 0.02221670341491699, 0.022223871231079103, 0.022175743103027345, 0.04688896179199219, 0.022303743362426756, 0.022354944229125977, 0.022297599792480468, 0.022215679168701173, 0.022192127227783204, 0.022171648025512695, 0.02224844741821289, 0.02211327934265137, 0.02228531265258789, 0.023112703323364257, 0.023995391845703123, 0.02248089599609375, 0.02227609634399414, 0.02220134353637695, 0.022296575546264647, 0.022337535858154296, 0.02222489547729492, 0.022120447158813478, 0.022183935165405275, 0.02221670341491699, 0.022187007904052734, 0.022220800399780274, 0.022237184524536133, 0.02226790428161621, 0.022206464767456056, 0.02225971221923828, 0.022140928268432617, 0.02229452705383301, 0.02228531265258789, 0.02229350471496582, 0.022176767349243166, 0.022361087799072265, 0.02225152015686035, 0.022210559844970702, 0.022215679168701173, 0.02226380729675293, 0.022187007904052734, 0.022213632583618165, 0.02224332809448242, 0.022754304885864256, 0.02226585578918457, 0.02221772766113281, 0.022206464767456056, 0.02226483154296875, 0.021966848373413086, 0.02205183982849121, 0.022099967956542968, 0.022203392028808593, 0.022363136291503907, 0.022364160537719727, 0.022344703674316405, 0.02223308753967285, 0.022335487365722655, 0.0224399356842041, 0.022565887451171874, 0.0225218563079834, 0.022244352340698242, 0.02223411178588867, 0.02225663948059082, 0.022342655181884767, 0.02221878433227539, 0.02215318489074707, 0.04551270294189453, 0.021498880386352538, 0.021541887283325196, 0.021501951217651367, 0.021432319641113282, 0.021425151824951173, 0.02147327995300293, 0.02146406364440918, 0.021530624389648437, 0.021445632934570313, 0.021526527404785157, 0.021679103851318358, 0.02146611213684082, 0.021538816452026367, 0.0215285758972168, 0.021593088150024413, 0.021651456832885742, 0.02127257537841797, 0.021340160369873046, 0.021440511703491212, 0.021550079345703126, 0.02147737693786621, 0.021549055099487305, 0.02142207908630371, 0.02148249626159668, 0.021562368392944335, 0.021440511703491212, 0.02149171257019043, 0.021424127578735352, 0.021378047943115236, 0.021389312744140625, 0.021445632934570313, 0.02142720031738281, 0.02143948745727539, 0.021332992553710937, 0.021432319641113282, 0.021433343887329103, 0.02142617607116699, 0.02143129539489746, 0.02147327995300293, 0.02142617607116699, 0.021556224822998047, 0.021329919815063478, 0.021178367614746094, 0.02149478340148926, 0.02295091247558594, 0.024001535415649415, 0.02289151954650879, 0.022328319549560546, 0.02247270393371582, 0.022336511611938475, 0.022393856048583984, 0.022271999359130858, 0.02305023956298828, 0.0224399356842041, 0.02233241653442383, 0.022658048629760744, 0.022633472442626954, 0.022371360778808594, 0.022270944595336913, 0.02266726493835449, 0.022451200485229493, 0.02231091117858887, 0.04704051208496094, 0.022401023864746093, 0.022451200485229493, 0.022221824645996095, 0.022182912826538087, 0.023061504364013673, 0.022665216445922853, 0.022202367782592772, 0.02227302360534668, 0.022352895736694335, 0.022350847244262697, 0.022240255355834963, 0.02226585578918457, 0.022435840606689454, 0.022339584350585938, 0.023191551208496093, 0.022448160171508788, 0.022245344161987306, 0.022419456481933595, 0.02229555130004883, 0.022278144836425783, 0.022253568649291993, 0.022322175979614257, 0.02143539237976074, 0.021354496002197267, 0.021412864685058593, 0.021437440872192383, 0.021246976852416992, 0.021206016540527343, 0.021312511444091797, 0.02145996856689453, 0.021396480560302734, 0.02141900825500488, 0.021416959762573243, 0.021406719207763672, 0.02141798400878906, 0.021346303939819337, 0.021408767700195314, 0.02161664009094238, 0.02151219177246094, 0.021358591079711914, 0.021411840438842773, 0.021358591079711914, 0.021420032501220702, 0.02142416000366211, 0.021448671340942384, 0.021368831634521485, 0.02146611213684082, 0.021394432067871092, 0.02146099281311035, 0.021359615325927735, 0.021428224563598632, 0.021392383575439454, 0.021569536209106444, 0.02145996856689453, 0.02147020721435547, 0.0214835205078125, 0.021580799102783203, 0.02149478340148926, 0.02230169677734375, 0.0222873592376709, 0.022323200225830078, 0.02229964828491211, 0.045434879302978515, 0.02152448081970215, 0.02148044776916504, 0.021498880386352538, 0.021572608947753907, 0.021533695220947266, 0.021521408081054686, 0.02148454475402832, 0.02149990463256836, 0.02147737693786621, 0.021549055099487305, 0.02145484733581543, 0.02142207908630371, 0.02142310333251953, 0.021547008514404296, 0.021576704025268553, 0.021511167526245118, 0.021346303939819337, 0.021358591079711914, 0.02142720031738281, 0.021970943450927736, 0.02230169677734375, 0.022139904022216796, 0.022558719635009765, 0.022374399185180666, 0.022157312393188477, 0.022334463119506837, 0.022297599792480468, 0.02227712059020996, 0.022139904022216796, 0.02226585578918457, 0.022194175720214843, 0.022185983657836913, 0.022076416015625, 0.022228992462158204, 0.02224844741821289, 0.02190336036682129, 0.022204416275024414, 0.022226943969726562, 0.022199296951293947, 0.022120447158813478, 0.02222591972351074, 0.02211327934265137, 0.02224745559692383, 0.022212575912475586, 0.022345727920532226, 0.022420480728149415, 0.022236160278320313, 0.022222848892211915, 0.02214297676086426, 0.02222591972351074, 0.022152191162109376, 0.02224127960205078, 0.022139904022216796, 0.022162431716918944, 0.022288383483886717, 0.022170623779296874, 0.02231705665588379, 0.022303743362426756, 0.022345727920532226, 0.0222423038482666, 0.022157312393188477, 0.02229964828491211]",tokens/s,44.35834797978967,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1298.415616,872.93952,0.0,226.492416,184.397824,s,11,0.17165715312957763,0.015605195739052512,0.0007607833862353127,0.01534540843963623,0.01575004768371582,0.016863200187683107,0.017753722190856934,"[0.01797635269165039, 0.015421407699584961, 0.015347392082214355, 0.015315903663635253, 0.015256128311157226, 0.01575004768371582, 0.01534540843963623, 0.015287008285522461, 0.015340383529663087, 0.01527171230316162, 0.01534540843963623]",tokens/s,16404.792626814135,kWh,1.8500959462776245e-07,1.0137606156642736e-07,3.4862617005003736e-07,6.350118262442272e-07,tokens/kWh,403142098.1780924,MB,1298.743296,872.93952,0.0,226.492416,197.933568,s,11,10.171151123046876,0.9246501020951705,0.010660233315434522,0.920117919921875,0.9293056640625,0.9430220336914062,0.9539951293945312,"[0.9567384033203125, 0.9293056640625, 0.9245167846679687, 0.9185679931640625, 0.920117919921875, 0.9228417358398437, 0.919580810546875, 0.9193172607421874, 0.9187877807617187, 0.923688232421875, 0.9176885375976562]",tokens/s,68.13388097535262,kWh,1.1024352834572037e-05,6.040874968975529e-06,1.993414258140665e-05,3.6999370384954216e-05,tokens/kWh,1702731.6774454878,,s,692,10.307773435592651,0.014895626352012501,0.0018980452913926306,0.014577664375305176,0.01503354845046997,0.015412480163574216,0.030279741020202637,"[0.016365568161010743, 0.016268287658691406, 0.01589350414276123, 0.01510707187652588, 0.015166463851928711, 0.015711232185363768, 0.015400959968566894, 0.015467519760131837, 0.015654911994934084, 0.015433728218078613, 0.015542304039001465, 0.015136799812316894, 0.015172543525695801, 0.015097855567932129, 0.015466496467590332, 0.016034816741943358, 0.015383551597595215, 0.015619071960449218, 0.015451135635375977, 0.015178751945495606, 0.015625215530395507, 0.015426560401916504, 0.015222847938537598, 0.015156160354614258, 0.015565823554992676, 0.016327680587768553, 0.015833087921142578, 0.01530675220489502, 0.015086591720581055, 0.01507737636566162, 0.01501088047027588, 0.01481721591949463, 0.015222784042358398, 0.015755264282226563, 0.016021503448486327, 0.015339520454406739, 0.015148032188415527, 0.01509171199798584, 0.015104000091552734, 0.015050751686096191, 0.015105024337768554, 0.014737407684326171, 0.014621696472167968, 0.014722047805786133, 0.014584896087646484, 0.014627776145935058, 0.014577664375305176, 0.014598143577575684, 0.014589952468872071, 0.014524415969848633, 0.014585856437683106, 0.01456332778930664, 0.014573568344116212, 0.014537728309631348, 0.014552063941955566, 0.014576640129089356, 0.014576640129089356, 0.014548992156982422, 0.01457049560546875, 0.014616576194763184, 0.014621696472167968, 0.014593024253845215, 0.030418943405151368, 0.014628864288330079, 0.014632960319519044, 0.0145797119140625, 0.01460531234741211, 0.014616576194763184, 0.01458073616027832, 0.014573568344116212, 0.014584832191467285, 0.014578687667846679, 0.014615551948547363, 0.014621696472167968, 0.01458790397644043, 0.014673919677734374, 0.014622719764709472, 0.01457151985168457, 0.014611455917358398, 0.0145797119140625, 0.014619647979736328, 0.014697471618652343, 0.014582783699035644, 0.014603263854980468, 0.01460223960876465, 0.01459404754638672, 0.014568448066711426, 0.014610431671142577, 0.014583807945251465, 0.014577664375305176, 0.014595071792602539, 0.014558208465576173, 0.01457254409790039, 0.014645248413085938, 0.014598143577575684, 0.015670271873474122, 0.015376383781433106, 0.015114239692687988, 0.015088640213012695, 0.015019007682800293, 0.015113216400146484, 0.015021056175231933, 0.014995455741882324, 0.014962688446044922, 0.014998527526855468, 0.014962688446044922, 0.014985216140747071, 0.014946304321289062, 0.01500876808166504, 0.014965760231018066, 0.015024191856384277, 0.014966719627380371, 0.014658559799194336, 0.014590975761413574, 0.014565376281738282, 0.01461350440979004, 0.014582783699035644, 0.015023103713989258, 0.01509887981414795, 0.014726143836975097, 0.014541824340820313, 0.0145797119140625, 0.014606335639953612, 0.014665727615356445, 0.01459609603881836, 0.030462976455688476, 0.014574591636657714, 0.014746623992919922, 0.015026176452636719, 0.015056960105895996, 0.015804351806640624, 0.015242239952087403, 0.015034367561340332, 0.015006719589233398, 0.014931967735290527, 0.01477734375, 0.0145797119140625, 0.014584832191467285, 0.014592000007629394, 0.014552063941955566, 0.014543871879577636, 0.014574591636657714, 0.014581760406494141, 0.014575615882873535, 0.014657535552978516, 0.014608384132385254, 0.014558208465576173, 0.014607359886169433, 0.014606335639953612, 0.01459609603881836, 0.014619647979736328, 0.014576640129089356, 0.014553088188171387, 0.014573568344116212, 0.014575615882873535, 0.014550016403198243, 0.01458892822265625, 0.014589952468872071, 0.014590975761413574, 0.014647295951843262, 0.01455513572692871, 0.014576640129089356, 0.014583807945251465, 0.014652416229248047, 0.01460223960876465, 0.014617600440979005, 0.014598143577575684, 0.0145797119140625, 0.01457254409790039, 0.014576640129089356, 0.014589952468872071, 0.01458790397644043, 0.014652416229248047, 0.014550016403198243, 0.01457049560546875, 0.01458892822265625, 0.014541824340820313, 0.0145797119140625, 0.0145633602142334, 0.014960607528686524, 0.014947327613830566, 0.014850111961364746, 0.014465984344482421, 0.014386176109313965, 0.014425087928771972, 0.014609408378601075, 0.014697471618652343, 0.014664704322814942, 0.030324735641479493, 0.014525440216064453, 0.014568448066711426, 0.014598143577575684, 0.01457049560546875, 0.014592000007629394, 0.014541824340820313, 0.014660639762878418, 0.014574560165405273, 0.014515199661254884, 0.014586879730224609, 0.014577664375305176, 0.01461350440979004, 0.014574591636657714, 0.01455513572692871, 0.014520319938659668, 0.014518272399902344, 0.01457151985168457, 0.014554112434387208, 0.01456332778930664, 0.014540800094604492, 0.01457049560546875, 0.01456332778930664, 0.014504960060119629, 0.014553088188171387, 0.014619647979736328, 0.014583807945251465, 0.014557184219360352, 0.014566399574279786, 0.014529536247253418, 0.014600192070007324, 0.014612480163574219, 0.01456332778930664, 0.014575615882873535, 0.014627840042114258, 0.01467084789276123, 0.014575615882873535, 0.014560256004333496, 0.014663680076599121, 0.014564352035522461, 0.014524415969848633, 0.01454694366455078, 0.01458073616027832, 0.014548992156982422, 0.01452444839477539, 0.014602208137512207, 0.01455513572692871, 0.01456332778930664, 0.014532608032226562, 0.01461350440979004, 0.014622719764709472, 0.014517248153686524, 0.01459404754638672, 0.014558208465576173, 0.014556159973144531, 0.014548992156982422, 0.01457049560546875, 0.014508031845092773, 0.014590975761413574, 0.014540800094604492, 0.014600192070007324, 0.01469644832611084, 0.014692352294921876, 0.030276607513427735, 0.01461350440979004, 0.014553088188171387, 0.014508031845092773, 0.014564352035522461, 0.014553088188171387, 0.014565376281738282, 0.014566399574279786, 0.014537728309631348, 0.014530559539794922, 0.01459609603881836, 0.014569472312927247, 0.014567423820495605, 0.014508031845092773, 0.01455513572692871, 0.014628864288330079, 0.014535679817199706, 0.01458790397644043, 0.014557184219360352, 0.014535712242126466, 0.014557151794433594, 0.014535679817199706, 0.014534655570983887, 0.014531583786010742, 0.014545920372009278, 0.014566399574279786, 0.014538751602172852, 0.014553088188171387, 0.01456332778930664, 0.014638079643249511, 0.014538751602172852, 0.014615551948547363, 0.014623744010925293, 0.014533663749694824, 0.014525407791137696, 0.014531583786010742, 0.0145797119140625, 0.01457049560546875, 0.014547967910766601, 0.014557184219360352, 0.014577664375305176, 0.014550016403198243, 0.014560256004333496, 0.014547967910766601, 0.01447935962677002, 0.014482432365417481, 0.014364671707153321, 0.014386176109313965, 0.014722047805786133, 0.015221759796142579, 0.01508351993560791, 0.015017984390258789, 0.014990336418151855, 0.014946304321289062, 0.014996479988098145, 0.014521344184875488, 0.014540800094604492, 0.014592000007629394, 0.014558208465576173, 0.014535679817199706, 0.01457049560546875, 0.014556159973144531, 0.014526464462280274, 0.031153152465820313, 0.014898176193237305, 0.01458790397644043, 0.014568448066711426, 0.014693375587463378, 0.014625791549682618, 0.015431679725646973, 0.014708736419677734, 0.014601216316223145, 0.01455513572692871, 0.014576640129089356, 0.014567423820495605, 0.014589952468872071, 0.014566399574279786, 0.014551039695739745, 0.01480191993713379, 0.014784511566162109, 0.015605759620666505, 0.015138815879821778, 0.014937088012695313, 0.014920703887939453, 0.014586879730224609, 0.014550047874450684, 0.014574624061584473, 0.01459712028503418, 0.014562239646911621, 0.014526464462280274, 0.014525440216064453, 0.014536704063415527, 0.014520319938659668, 0.014515199661254884, 0.014625791549682618, 0.014533632278442383, 0.014556159973144531, 0.014573568344116212, 0.014589952468872071, 0.01458073616027832, 0.014565376281738282, 0.014610431671142577, 0.01459609603881836, 0.01466982364654541, 0.014590975761413574, 0.014558208465576173, 0.014566399574279786, 0.014538751602172852, 0.014564352035522461, 0.014567423820495605, 0.014530559539794922, 0.014538751602172852, 0.014535679817199706, 0.014551039695739745, 0.01457151985168457, 0.014675968170166016, 0.014567423820495605, 0.014585856437683106, 0.014559231758117675, 0.014527487754821777, 0.014577664375305176, 0.014561280250549317, 0.014562303543090821, 0.014578687667846679, 0.014515199661254884, 0.01454086399078369, 0.03033900833129883, 0.014519295692443847, 0.014643199920654297, 0.014578687667846679, 0.014610431671142577, 0.014585856437683106, 0.014574591636657714, 0.014586879730224609, 0.01457254409790039, 0.014628864288330079, 0.01458790397644043, 0.01529856014251709, 0.014918656349182128, 0.014584832191467285, 0.014532608032226562, 0.014585920333862305, 0.014574527740478516, 0.014573568344116212, 0.014543871879577636, 0.014592063903808595, 0.01458886432647705, 0.014574591636657714, 0.01458892822265625, 0.014633983612060546, 0.014540800094604492, 0.014567423820495605, 0.014564352035522461, 0.014526464462280274, 0.014562303543090821, 0.01458073616027832, 0.014523391723632812, 0.014614527702331542, 0.014553088188171387, 0.014541824340820313, 0.014567423820495605, 0.014635007858276367, 0.014566399574279786, 0.01459404754638672, 0.014535679817199706, 0.014558208465576173, 0.014565376281738282, 0.01458073616027832, 0.014585856437683106, 0.014607359886169433, 0.014540800094604492, 0.014544896125793457, 0.014617600440979005, 0.014566399574279786, 0.01457254409790039, 0.014559231758117675, 0.014561280250549317, 0.01458073616027832, 0.014527551651000977, 0.014563263893127442, 0.014543871879577636, 0.014548992156982422, 0.014582783699035644, 0.014598143577575684, 0.014575615882873535, 0.014552063941955566, 0.014589952468872071, 0.01457254409790039, 0.014573568344116212, 0.030249984741210937, 0.014523391723632812, 0.014586943626403809, 0.014542783737182617, 0.014595104217529296, 0.014566368103027343, 0.014517248153686524, 0.014551039695739745, 0.014527487754821777, 0.014636032104492188, 0.014616576194763184, 0.014551072120666505, 0.014565343856811524, 0.014598143577575684, 0.0146626558303833, 0.014781439781188965, 0.015035391807556153, 0.014568448066711426, 0.01457254409790039, 0.014618623733520507, 0.014560256004333496, 0.014574591636657714, 0.014547967910766601, 0.014540800094604492, 0.014562303543090821, 0.014603263854980468, 0.01456332778930664, 0.014577664375305176, 0.014547967910766601, 0.01459404754638672, 0.01458790397644043, 0.014542847633361817, 0.014595071792602539, 0.01458790397644043, 0.014665792465209961, 0.014555071830749512, 0.014537728309631348, 0.014530559539794922, 0.014581760406494141, 0.014584832191467285, 0.014564352035522461, 0.014573568344116212, 0.014548992156982422, 0.014700544357299805, 0.014585856437683106, 0.014574591636657714, 0.014560256004333496, 0.014617600440979005, 0.014556159973144531, 0.014578687667846679, 0.01455513572692871, 0.01466982364654541, 0.014557184219360352, 0.014561280250549317, 0.014525440216064453, 0.014565376281738282, 0.014586879730224609, 0.0145664644241333, 0.014582719802856445, 0.014539775848388671, 0.014560256004333496, 0.01460428810119629, 0.014592000007629394, 0.030311424255371092, 0.014531583786010742, 0.014569472312927247, 0.014636032104492188, 0.014564352035522461, 0.014538751602172852, 0.01458892822265625, 0.014531583786010742, 0.014553088188171387, 0.014525440216064453, 0.014530624389648437, 0.014523327827453613, 0.014575615882873535, 0.014558208465576173, 0.014516223907470703, 0.014637056350708008, 0.014518272399902344, 0.014577664375305176, 0.01495961570739746, 0.014617600440979005, 0.014619711875915528, 0.014569408416748047, 0.014639103889465332, 0.014576640129089356, 0.014560256004333496, 0.014568448066711426, 0.0145797119140625, 0.014550016403198243, 0.014577664375305176, 0.014582783699035644, 0.014611488342285156, 0.014558176040649413, 0.014578687667846679, 0.014569472312927247, 0.014663680076599121, 0.014584832191467285, 0.014565376281738282, 0.01456332778930664, 0.014550016403198243, 0.014538751602172852, 0.014530559539794922, 0.014534655570983887, 0.014559231758117675, 0.01455513572692871, 0.014628864288330079, 0.014585856437683106, 0.014578687667846679, 0.014625791549682618, 0.014540800094604492, 0.014582783699035644, 0.014582783699035644, 0.014595071792602539, 0.014568448066711426, 0.014524415969848633, 0.014545920372009278, 0.014603263854980468, 0.01457049560546875, 0.014561280250549317, 0.014623744010925293, 0.014557184219360352, 0.014583807945251465, 0.014567487716674805, 0.014582719802856445, 0.03019980812072754, 0.014551039695739745, 0.01457151985168457, 0.014541824340820313, 0.014585856437683106, 0.01539891242980957, 0.015851519584655763, 0.014816255569458007, 0.014586879730224609, 0.014638079643249511, 0.014639103889465332, 0.014531583786010742, 0.014663680076599121, 0.014520319938659668, 0.014543871879577636, 0.014617600440979005, 0.014495743751525878, 0.01458073616027832, 0.014540800094604492, 0.014554112434387208, 0.014567423820495605, 0.01455513572692871, 0.014504960060119629, 0.01479475212097168, 0.015308799743652344, 0.014611455917358398, 0.014626815795898437, 0.014567423820495605, 0.014535679817199706, 0.01460223960876465, 0.01458892822265625, 0.014566399574279786, 0.014566399574279786, 0.014557184219360352, 0.014724096298217774, 0.014551039695739745, 0.014535712242126466, 0.014597087860107423, 0.014573599815368652, 0.014573535919189452, 0.014564352035522461, 0.01457049560546875, 0.014740480422973632, 0.014905344009399414, 0.014684160232543946, 0.014547967910766601, 0.014768128395080566, 0.015746047973632812, 0.014673919677734374, 0.014562303543090821, 0.014558208465576173, 0.014558208465576173, 0.01455513572692871, 0.014536704063415527, 0.01458790397644043, 0.014529536247253418, 0.014589952468872071, 0.014554112434387208, 0.014553088188171387, 0.014586879730224609, 0.014547967910766601, 0.014573599815368652, 0.014557151794433594, 0.03037593650817871, 0.014526464462280274, 0.014798848152160645, 0.014523391723632812, 0.01459404754638672, 0.014567423820495605, 0.014531583786010742, 0.014575615882873535, 0.014536704063415527, 0.014520319938659668, 0.014532608032226562, 0.014419967651367188, 0.014387200355529785, 0.01439027214050293, 0.014443519592285157, 0.014552063941955566, 0.014498815536499024, 0.014529536247253418, 0.014651391983032227, 0.014534655570983887, 0.014548992156982422, 0.014628864288330079, 0.014568448066711426, 0.01458790397644043, 0.01457151985168457, 0.014544896125793457, 0.014608384132385254, 0.014728192329406739, 0.014638079643249511, 0.014592000007629394, 0.014573568344116212, 0.014636032104492188, 0.014647295951843262, 0.014457856178283691, 0.014402560234069824, 0.014632960319519044, 0.014524415969848633, 0.01457049560546875, 0.01459712028503418, 0.014574591636657714, 0.014561280250549317, 0.01458790397644043, 0.014598143577575684, 0.014618623733520507, 0.014533632278442383, 0.014523391723632812, 0.014573568344116212, 0.01456332778930664, 0.01457151985168457, 0.01458073616027832, 0.014542847633361817, 0.014547967910766601, 0.014568448066711426, 0.014557184219360352, 0.014553088188171387, 0.014541824340820313, 0.014518272399902344, 0.014573568344116212, 0.014523455619812012, 0.014740415573120117, 0.014707776069641113, 0.014453696250915528, 0.014403583526611329]",tokens/s,67.13379997376835,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4294.987776,14621.868032,0.0,13975.420928,13366.068224,s,10,16.495098876953126,1.6495098876953125,0.0005626635207808348,1.6493680419921875,1.6498059448242186,1.6504339782714843,1.650936405029297,"[1.65106201171875, 1.6493150634765625, 1.6490108642578125, 1.64951513671875, 1.6496663818359374, 1.6489195556640626, 1.649384521484375, 1.6492862548828124, 1.6495875244140625, 1.6493515625]",tokens/s,155.19761470340868,kWh,1.9482200029823513e-05,1.067632958434842e-05,9.151657321320173e-05,0.00012167510282737365,tokens/kWh,2103963.7037595077,MB,4294.987776,14621.868032,0.0,13975.420928,13814.414848,s,10,977.7317578124998,97.77317578125,0.006501026483324052,97.774078125,97.7795859375,97.78208203125,97.78407890625,"[97.7740859375, 97.76746875, 97.7754296875, 97.784578125, 97.7782421875, 97.7740703125, 97.77903125, 97.76209375, 97.764875, 97.7718828125]",tokens/s,0.6443485086436308,kWh,0.0011542207027475041,0.0006326151578875942,0.005356944729996799,0.007143780590631898,tokens/kWh,8818.859874086276,,s,629,991.0235808105483,1.5755541825286914,0.19600214423351972,1.55186181640625,1.5529105224609374,1.5533291259765625,3.2017358984375,"[1.5526031494140624, 1.5511746826171875, 1.5512442626953125, 1.5510794677734374, 1.55169482421875, 1.5518966064453126, 1.5526973876953125, 1.5532349853515626, 1.55080810546875, 1.55123095703125, 1.5509954833984374, 1.5513375244140626, 1.551034423828125, 1.552995361328125, 1.5513057861328126, 1.55249462890625, 1.5510445556640624, 1.551331298828125, 1.5506575927734374, 1.55245263671875, 1.5513251953125, 1.5512381591796875, 1.551388671875, 1.5524095458984375, 1.5524761962890625, 1.5533260498046875, 1.553095703125, 1.5521700439453125, 1.551240234375, 1.551688720703125, 1.5509698486328125, 1.55129443359375, 1.5527669677734375, 1.5515802001953125, 1.551595458984375, 1.5514736328125, 1.552204833984375, 1.5519498291015625, 1.55194677734375, 1.55318994140625, 1.55307421875, 1.5523338623046874, 1.5521546630859375, 1.55177978515625, 1.5524197998046876, 1.5533404541015625, 1.5529237060546874, 1.5524351806640626, 1.55150439453125, 1.55194775390625, 1.552548828125, 1.5523931884765625, 1.55225390625, 1.5514603271484375, 1.5512484130859374, 1.5517244873046876, 1.5518822021484375, 1.552468017578125, 1.55346630859375, 1.5518555908203124, 1.5518289794921876, 1.5518668212890625, 3.202376708984375, 1.55222216796875, 1.5525550537109376, 1.551751220703125, 1.5510302734375, 1.55141943359375, 1.5515596923828125, 1.5518863525390625, 1.5530260009765624, 1.5516702880859374, 1.5516968994140625, 1.55188330078125, 1.55222119140625, 1.5524822998046874, 1.5535472412109375, 1.552794677734375, 1.550878662109375, 1.551520751953125, 1.55161083984375, 1.5515064697265626, 1.5530260009765624, 1.5537049560546874, 1.5521280517578124, 1.5518392333984374, 1.5516507568359375, 1.5517174072265625, 1.5512647705078124, 1.5524556884765626, 1.5519549560546875, 1.552384033203125, 1.5521689453125, 1.5524669189453124, 1.5522928466796875, 1.5529041748046875, 1.552691162109375, 1.55226318359375, 1.552175048828125, 1.5517655029296875, 1.5517972412109375, 1.55106201171875, 1.5525765380859375, 1.551730712890625, 1.5510968017578124, 1.550551025390625, 1.5507015380859375, 1.5512001953125, 1.5515802001953125, 1.5522396240234375, 1.5507763671875, 1.5507752685546874, 1.551515625, 1.551489990234375, 1.5510538330078125, 1.552689208984375, 1.5505909423828126, 1.55152587890625, 1.551647705078125, 1.5511357421875, 1.5508541259765625, 1.5514869384765626, 1.5528365478515624, 1.5521771240234374, 1.5512657470703124, 3.201815673828125, 1.5507783203125, 1.5535196533203126, 1.55132421875, 1.551177734375, 1.55173681640625, 1.5521044921875, 1.5513375244140626, 1.5526204833984374, 1.5516661376953125, 1.5514337158203124, 1.551310791015625, 1.5512391357421875, 1.5521761474609375, 1.5526307373046875, 1.553349609375, 1.551321044921875, 1.5509493408203125, 1.5514971923828125, 1.5512432861328125, 1.551467529296875, 1.552627685546875, 1.552321533203125, 1.5515975341796875, 1.5512330322265624, 1.55135595703125, 1.5512811279296874, 1.55175732421875, 1.5532247314453125, 1.5510487060546876, 1.55163232421875, 1.551236083984375, 1.55205224609375, 1.551705078125, 1.5527781982421875, 1.55230517578125, 1.5521064453125, 1.5526748046875, 1.551935546875, 1.5518392333984374, 1.553038330078125, 1.552990234375, 1.5518115234375, 1.551340576171875, 1.5512269287109375, 1.5518863525390625, 1.551740966796875, 1.553649658203125, 1.551879150390625, 1.5528212890625, 1.552669677734375, 1.5528980712890625, 1.5524495849609374, 1.5538206787109374, 1.55340185546875, 1.5514490966796874, 1.55186181640625, 1.5515064697265626, 1.5515709228515624, 1.5511982421875, 1.5528519287109375, 1.5516334228515625, 1.5513446044921875, 3.2030166015625, 1.55180029296875, 1.552606201171875, 1.5531837158203126, 1.551551513671875, 1.55184130859375, 1.5515115966796875, 1.551283203125, 1.551794189453125, 1.5518525390625, 1.5510743408203125, 1.551730712890625, 1.551551513671875, 1.5510927734375, 1.550951416015625, 1.5540325927734375, 1.55168359375, 1.5516273193359376, 1.5512708740234376, 1.5510076904296874, 1.55110302734375, 1.552822265625, 1.5531990966796876, 1.551572021484375, 1.552153564453125, 1.5514736328125, 1.5518802490234376, 1.551657958984375, 1.5528406982421874, 1.5522344970703126, 1.5521812744140624, 1.5515064697265626, 1.55281103515625, 1.55241064453125, 1.552889892578125, 1.5524730224609375, 1.5517911376953124, 1.5517276611328126, 1.5515074462890626, 1.550802978515625, 1.552126953125, 1.55321240234375, 1.5522139892578124, 1.5518084716796876, 1.5521444091796874, 1.5523614501953125, 1.55158837890625, 1.5536046142578126, 1.5520296630859376, 1.5530731201171875, 1.5527608642578126, 1.552880615234375, 1.5533311767578124, 1.5541422119140624, 1.55335888671875, 1.551869873046875, 1.55161083984375, 1.5517900390625, 1.5514500732421874, 1.551742919921875, 1.553448974609375, 1.551916015625, 1.552142333984375, 3.20272802734375, 1.5516005859375, 1.55409814453125, 1.552426025390625, 1.5515802001953125, 1.5510958251953124, 1.5515125732421875, 1.55104052734375, 1.5517767333984376, 1.5526820068359375, 1.5530455322265626, 1.5519539794921875, 1.5517962646484376, 1.552195556640625, 1.5515853271484374, 1.5515821533203125, 1.5523768310546875, 1.5513426513671875, 1.551593505859375, 1.5516016845703124, 1.5524136962890625, 1.551958984375, 1.5531632080078126, 1.5515289306640625, 1.55102099609375, 1.5516641845703125, 1.5517286376953126, 1.5518515625, 1.5519652099609376, 1.5515596923828125, 1.55163134765625, 1.5519027099609375, 1.5522764892578125, 1.5519969482421876, 1.5524884033203126, 1.5527935791015626, 1.5509124755859376, 1.55156884765625, 1.551499267578125, 1.5518935546875, 1.551709228515625, 1.552568359375, 1.552501708984375, 1.5518760986328124, 1.5515699462890624, 1.551488037109375, 1.5521546630859375, 1.5521934814453124, 1.5524454345703125, 1.553112060546875, 1.5525970458984375, 1.551457275390625, 1.551424560546875, 1.5510076904296874, 1.5529554443359375, 1.5528099365234376, 1.552541748046875, 1.5528201904296874, 1.5534459228515625, 1.5517440185546876, 1.5520296630859376, 1.5527730712890624, 1.55129345703125, 3.201416259765625, 1.5519610595703126, 1.551810546875, 1.5517440185546876, 1.551605712890625, 1.5520235595703125, 1.5515657958984375, 1.552027587890625, 1.551488037109375, 1.5524935302734375, 1.5531314697265626, 1.5514276123046875, 1.5520655517578126, 1.5519825439453125, 1.551853515625, 1.5516712646484374, 1.5525252685546875, 1.551730712890625, 1.5519703369140625, 1.55103125, 1.55213720703125, 1.552764892578125, 1.553755126953125, 1.5528775634765626, 1.5516845703125, 1.5517542724609374, 1.5515330810546875, 1.5523287353515625, 1.552110595703125, 1.5518494873046875, 1.5517244873046876, 1.55184228515625, 1.551310791015625, 1.5519027099609375, 1.5507896728515624, 1.552606201171875, 1.5512637939453124, 1.5521474609375, 1.5516856689453125, 1.551458251953125, 1.5516610107421875, 1.552607177734375, 1.5517041015625, 1.5525919189453126, 1.55161083984375, 1.552396240234375, 1.550496826171875, 1.551563720703125, 1.5521248779296875, 1.5515289306640625, 1.5516282958984375, 1.5522191162109376, 1.5513795166015625, 1.551456298828125, 1.5522652587890624, 1.5525396728515626, 1.5528642578125, 1.5518760986328124, 1.5516190185546874, 1.5520552978515625, 1.5518668212890625, 1.5532369384765625, 1.552636962890625, 3.202765869140625, 1.5513466796875, 1.553580078125, 1.5520194091796875, 1.5519456787109376, 1.5510753173828125, 1.5518084716796876, 1.55133544921875, 1.5519498291015625, 1.5519241943359374, 1.5514061279296876, 1.551531005859375, 1.5519334716796875, 1.5514398193359376, 1.5528131103515626, 1.55154638671875, 1.55287451171875, 1.552290771484375, 1.5517265625, 1.550793701171875, 1.5515279541015625, 1.5525499267578124, 1.5527659912109375, 1.5533721923828125, 1.5519078369140624, 1.55173583984375, 1.5521668701171876, 1.5522508544921876, 1.5519713134765625, 1.5531519775390625, 1.5524608154296875, 1.551698974609375, 1.552278564453125, 1.55198974609375, 1.5515361328125, 1.55272705078125, 1.551920166015625, 1.552759765625, 1.5514500732421874, 1.5516590576171876, 1.551510498046875, 1.5519405517578124, 1.552752685546875, 1.55184130859375, 1.55072509765625, 1.5517808837890625, 1.5512965087890624, 1.552079833984375, 1.552720947265625, 1.5514542236328126, 1.553217529296875, 1.5522928466796875, 1.5522979736328124, 1.551899658203125, 1.551836181640625, 1.5530526123046875, 1.55264306640625, 1.552206787109375, 1.55278955078125, 1.5506513671875, 1.5514705810546876, 1.552828369140625, 1.5512606201171875, 3.20153076171875, 1.5501854248046876, 1.5515657958984375, 1.5514951171875, 1.552109619140625, 1.552099365234375, 1.552288818359375, 1.5523583984375, 1.5526461181640625, 1.5514920654296875, 1.5523031005859376, 1.5516446533203125, 1.55148291015625, 1.5517808837890625, 1.5515330810546875, 1.5520235595703125, 1.5527147216796875, 1.5511695556640626, 1.552215087890625, 1.5506851806640625, 1.5511387939453125, 1.5506053466796874, 1.55132421875, 1.5518238525390624, 1.551515625, 1.5510947265625, 1.5512073974609375, 1.5515657958984375, 1.5510599365234374, 1.5514132080078125, 1.551810546875, 1.55152490234375, 1.55114697265625, 1.5516077880859376, 1.5506268310546876, 1.5511910400390625, 1.5526318359375, 1.552217041015625, 1.551873046875, 1.5515443115234375, 1.551283203125, 1.551500244140625, 1.55306494140625, 1.5515596923828125, 1.5516978759765625, 1.5517235107421874, 1.551678466796875, 1.553006591796875, 1.5524617919921875, 1.5526624755859375, 1.5517911376953124, 1.5515576171875, 1.5515587158203126, 1.551556640625, 1.5509442138671874, 1.5528365478515624, 1.5521904296875, 1.55300244140625, 1.5519825439453125, 1.5525980224609375, 1.551983642578125, 1.5516201171875, 1.5528509521484375, 3.202553955078125, 1.551009765625, 1.5523553466796876, 1.5506207275390624, 1.5519232177734374, 1.551784912109375, 1.551542236328125, 1.551203369140625, 1.5513343505859376, 1.55032373046875, 1.5522550048828125, 1.5514869384765626, 1.551784912109375, 1.5508438720703126, 1.5520716552734375, 1.5515863037109374, 1.55217919921875, 1.552343017578125, 1.5512073974609375, 1.5511490478515626, 1.5520225830078125, 1.551941650390625, 1.5513641357421875, 1.5520562744140625, 1.5527935791015626, 1.553005615234375, 1.5519703369140625, 1.5519405517578124, 1.55236962890625, 1.5524075927734375, 1.552400390625, 1.5521392822265625, 1.5526983642578125, 1.5521812744140624, 1.5518033447265625, 1.5511234130859375, 1.5530147705078126, 1.552041015625, 1.5515872802734374, 1.5513333740234374, 1.5513599853515625, 1.5520286865234374, 1.5513743896484375, 1.55210546875, 1.5510108642578124, 1.551393798828125, 1.55116845703125, 1.551873046875, 1.5514449462890625, 1.5518966064453126, 1.55174609375, 1.5518658447265625, 1.5518095703125, 1.5519150390625, 1.5517449951171876, 1.5523460693359374, 1.552300048828125, 1.552015380859375, 1.551193115234375, 1.5512535400390626, 1.5524495849609374, 1.55131494140625, 1.553142822265625, 3.20293701171875, 1.5507025146484374, 1.5517491455078125, 1.5505745849609376, 1.551394775390625, 1.5516416015625, 1.5522529296875, 1.5509012451171875, 1.551172607421875, 1.5507313232421875, 1.551066162109375, 1.5524403076171875, 1.552574462890625, 1.5519908447265625, 1.5519549560546875, 1.5521126708984374, 1.5522078857421875, 1.553606689453125, 1.552162841796875, 1.5526041259765626, 1.552289794921875, 1.5522764892578125, 1.55186376953125, 1.5506749267578126, 1.551815673828125, 1.5521044921875, 1.550876708984375, 1.5515504150390624, 1.550856201171875, 1.5514798583984375, 1.552759765625, 1.551753173828125, 1.5519580078125, 1.5519119873046876, 1.551141845703125, 1.551688720703125, 1.5519180908203125, 1.5516416015625, 1.5518433837890624, 1.5516488037109375, 1.55226416015625, 1.5521085205078125, 1.5519918212890624, 1.5524515380859376, 1.55057666015625, 1.5518023681640625, 1.5514920654296875, 1.5522559814453125, 1.551362060546875, 1.5525518798828124, 1.5522672119140626, 1.551783935546875, 1.551810546875, 1.5519447021484376, 1.5518751220703124, 1.5525919189453126, 1.5539844970703125, 1.5537889404296874, 1.5529072265625, 1.55158935546875, 1.55196826171875, 1.5523502197265624, 1.552722900390625]",tokens/s,0.6346973091049438,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1612.562432,7598.505984,0.0,6952.05888,6314.304512,s,10,6.256621520996093,0.6256621520996093,0.00177274849590911,0.6261759033203125,0.6270674194335938,0.6281453430175781,0.6290076818847656,"[0.6292232666015625, 0.6262135009765625, 0.6265690307617188, 0.6241492919921875, 0.6230667114257813, 0.6232349853515625, 0.624854736328125, 0.6261383056640625, 0.6263438110351562, 0.626827880859375]",tokens/s,409.16651125677043,kWh,7.363675038019816e-06,4.0350039173812114e-06,3.4213050246233903e-05,4.561172920163493e-05,tokens/kWh,5612591.420691497,MB,1612.562432,7598.505984,0.0,6952.05888,6464.047616,s,10,369.81373437499997,36.981373437500004,0.03993526863753842,36.962308593749995,37.0309203125,37.048473828125,37.062516640625,"[37.02584375, 37.06602734375, 37.02701953125, 36.95202734375, 36.96666796875, 36.9451328125, 36.9648984375, 36.9521875, 36.95971875, 36.9542109375]",tokens/s,1.7035603100699468,kWh,0.0004362355767852731,0.00023909502275260607,0.0019567083202673705,0.0026320389198052495,tokens/kWh,23935.816270019863,,s,629,374.855053039551,0.5959539793951523,0.07432782764438475,0.5866659545898437,0.589058251953125,0.5894471923828125,1.2107634912109375,"[0.586392578125, 0.5868226318359375, 0.5859829711914063, 0.5863291015625, 0.5865410766601562, 0.5872220458984375, 0.586271728515625, 0.5857536010742187, 0.5860116577148438, 0.585923583984375, 0.5870950317382813, 0.5866148071289062, 0.5868349609375, 0.5866342163085938, 0.5861621704101563, 0.586134521484375, 0.5858928833007813, 0.58690869140625, 0.586186767578125, 0.5873684692382812, 0.5866659545898437, 0.5863311157226563, 0.588506103515625, 0.5890938720703125, 0.5862461547851563, 0.5873582153320313, 0.5887150268554687, 0.5900718383789062, 0.5890580444335938, 0.589464599609375, 0.5892474975585937, 0.5888132934570313, 0.5892208862304688, 0.5892362060546875, 0.58881640625, 0.587177978515625, 0.5861478271484375, 0.5872230224609375, 0.5890713500976562, 0.5897615356445313, 0.5891819458007812, 0.5891215209960937, 0.588368896484375, 0.5868963623046874, 0.5877462768554688, 0.587673583984375, 0.5883515014648437, 0.5891358642578125, 0.5892372436523438, 0.5901947021484375, 0.5882511596679687, 0.5863966674804687, 0.5867079467773437, 0.5862789306640624, 0.5875824584960937, 0.589075439453125, 0.5904906005859375, 0.5885787963867187, 0.5882501220703125, 0.5885009765625, 0.5887518920898438, 0.5892843627929687, 1.21577880859375, 0.5866813354492187, 0.58638232421875, 0.5860689697265625, 0.586250244140625, 0.5862062377929688, 0.5863833618164063, 0.5862236328125, 0.5872701416015625, 0.5891921997070313, 0.5893621826171875, 0.5892301025390625, 0.5891163940429688, 0.589286376953125, 0.58905908203125, 0.5892198486328125, 0.5870059814453125, 0.58844775390625, 0.5890242309570313, 0.5887774658203125, 0.5890150146484375, 0.5887242431640625, 0.5887866821289063, 0.5895270385742187, 0.5885030517578125, 0.5896734619140624, 0.5887754516601562, 0.5896263427734375, 0.5871104125976563, 0.5881671752929688, 0.5891235961914062, 0.5891962890625, 0.5888031005859375, 0.5896325073242188, 0.5887764282226563, 0.5869895629882812, 0.5890662231445313, 0.5892987060546875, 0.5882552490234375, 0.5869424438476563, 0.588790771484375, 0.5888245849609375, 0.5896591186523438, 0.5890518798828125, 0.589486083984375, 0.5889976196289063, 0.5883146362304688, 0.58652978515625, 0.5864765625, 0.5863751831054688, 0.586809326171875, 0.5878477172851563, 0.5889166870117187, 0.589169677734375, 0.5894891357421875, 0.5896304931640625, 0.5890713500976562, 0.5871114501953125, 0.5880750122070313, 0.5888890991210938, 0.588980224609375, 0.588822509765625, 0.588010498046875, 1.2160072021484376, 0.5894573974609375, 0.5897778930664063, 0.5893355712890626, 0.589739013671875, 0.5899202270507813, 0.5896151123046875, 0.58777294921875, 0.5865891723632812, 0.5862645874023438, 0.5884047241210938, 0.5887549438476563, 0.5893632202148438, 0.587030517578125, 0.589475830078125, 0.5881303100585937, 0.588000244140625, 0.5885706176757812, 0.5890109252929687, 0.5895772094726562, 0.5876244506835937, 0.5894738159179688, 0.58935400390625, 0.5891471557617187, 0.589496337890625, 0.5882972412109375, 0.5862020874023437, 0.5883658447265625, 0.58893310546875, 0.589431884765625, 0.5892074584960938, 0.5897041625976562, 0.5888942260742187, 0.5887662353515625, 0.589075439453125, 0.5871063232421875, 0.5874237670898438, 0.586166259765625, 0.5869127807617187, 0.5871524047851563, 0.5860792236328125, 0.5860894775390625, 0.5864212646484375, 0.5861089477539062, 0.5861775512695313, 0.586134521484375, 0.586650634765625, 0.586608642578125, 0.5868236694335938, 0.5863117065429687, 0.5860730590820312, 0.5861652221679687, 0.5863720703125, 0.587325439453125, 0.5869486083984375, 0.5867796630859375, 0.586829833984375, 0.5859512329101563, 0.586598388671875, 0.5858897705078125, 0.5857484741210938, 0.5861038208007813, 0.5859328002929688, 1.21084521484375, 0.5868482666015625, 0.58640283203125, 0.5865072631835937, 0.58753125, 0.5866895141601562, 0.5865697021484375, 0.5866485595703125, 0.5865379638671875, 0.5872005004882812, 0.5859409790039063, 0.5867479248046875, 0.5860208740234375, 0.5864622192382812, 0.5861642456054688, 0.5862113037109375, 0.5866629028320313, 0.5866127319335938, 0.5868892211914063, 0.5860433959960938, 0.5860228881835937, 0.585712646484375, 0.5860034790039063, 0.5866076049804687, 0.5860700073242188, 0.5865840454101563, 0.5865113525390625, 0.5863505859375, 0.5864140625, 0.5859491577148438, 0.586281982421875, 0.5866659545898437, 0.5862092895507812, 0.5860966186523437, 0.586608642578125, 0.5861068725585937, 0.5864765625, 0.585987060546875, 0.5858989868164063, 0.5861160888671875, 0.587757568359375, 0.586156005859375, 0.5871318969726562, 0.5865809936523437, 0.5864468383789062, 0.5861632080078125, 0.5858989868164063, 0.586124267578125, 0.5864806518554687, 0.5860403442382812, 0.5868748779296875, 0.586625, 0.5871697998046875, 0.586534912109375, 0.5867571411132813, 0.5872445678710938, 0.5864386596679687, 0.5876326293945312, 0.588031982421875, 0.5878978271484375, 0.5873828735351563, 0.5863617553710937, 0.5862686767578125, 1.20973828125, 0.5866116943359375, 0.5870663452148438, 0.5871646728515625, 0.586650634765625, 0.5869865112304687, 0.586355712890625, 0.5865973510742187, 0.5867642822265625, 0.5863895263671876, 0.586640380859375, 0.5864683227539063, 0.5873387451171875, 0.587441162109375, 0.5863936157226562, 0.5867008056640625, 0.5865328369140625, 0.5866751708984375, 0.5866547241210938, 0.5867018432617187, 0.5871431884765625, 0.5867120361328125, 0.5885173950195313, 0.58661376953125, 0.5860853881835938, 0.5864765625, 0.5864909057617187, 0.587452392578125, 0.587109375, 0.5869700927734375, 0.5880678100585938, 0.5873899536132813, 0.5867468872070313, 0.5864356079101563, 0.5865267333984375, 0.586555419921875, 0.5870673828125, 0.5869865112304687, 0.5871329345703125, 0.5876347045898438, 0.5866997680664062, 0.586439697265625, 0.5864110107421875, 0.5860280151367188, 0.586935302734375, 0.5874688110351562, 0.5872630004882813, 0.5882040405273438, 0.5861519165039063, 0.5861754760742187, 0.5864959716796875, 0.5864253540039063, 0.5865482177734375, 0.586439697265625, 0.58705615234375, 0.586955810546875, 0.5866649169921875, 0.586872802734375, 0.5867396850585938, 0.5856685791015624, 0.585691162109375, 0.5862932739257812, 0.5861509399414062, 1.2105533447265624, 0.5868963623046874, 0.5862307739257813, 0.5858088989257813, 0.5863147583007813, 0.5863117065429687, 0.5865164794921875, 0.5869424438476563, 0.5864171752929688, 0.5864099731445312, 0.5860372314453125, 0.5859174194335938, 0.5860126953125, 0.5862420654296875, 0.5864284057617187, 0.5860905151367187, 0.5861795654296875, 0.5876797485351563, 0.5864632568359375, 0.5860034790039063, 0.5856358642578126, 0.5858897705078125, 0.5856849975585937, 0.5860556640625, 0.5864069213867188, 0.5858795776367187, 0.5864683227539063, 0.5857034301757813, 0.586197998046875, 0.5860546264648437, 0.5868114013671875, 0.5869946899414062, 0.586625, 0.5865584716796876, 0.5874636840820312, 0.5859491577148438, 0.586603515625, 0.5859368896484375, 0.5866004638671874, 0.585970703125, 0.5859840087890625, 0.5862932739257812, 0.5863259887695312, 0.58724658203125, 0.586335205078125, 0.5863598022460937, 0.5860515747070313, 0.5862676391601562, 0.586872802734375, 0.5865533447265625, 0.5867427978515625, 0.5866065673828125, 0.5869660034179688, 0.5869803466796875, 0.5866997680664062, 0.5868963623046874, 0.5864785766601562, 0.58616015625, 0.5860546264648437, 0.5872752685546875, 0.5869076538085938, 0.5865799560546875, 0.586840087890625, 1.211283447265625, 0.5869813842773437, 0.5866997680664062, 0.5866414184570312, 0.587345947265625, 0.5873807373046875, 0.5871063232421875, 0.5870315551757812, 0.5875353393554688, 0.58697216796875, 0.5863075561523438, 0.586588134765625, 0.5869383544921875, 0.587821044921875, 0.5868165283203125, 0.5868810424804688, 0.5863833618164063, 0.5862512817382812, 0.5872967529296875, 0.5871339721679687, 0.5874821166992188, 0.5877125244140625, 0.5862809448242188, 0.5868349609375, 0.58678271484375, 0.5863117065429687, 0.5862789306640624, 0.586708984375, 0.5875824584960937, 0.5870919799804688, 0.5866690673828125, 0.586181640625, 0.5861775512695313, 0.5859921875, 0.5867694091796875, 0.58703564453125, 0.5866270751953125, 0.586419189453125, 0.5869496459960938, 0.5864642333984375, 0.5868062744140625, 0.5862686767578125, 0.5867919311523437, 0.5862195434570312, 0.58638232421875, 0.5867694091796875, 0.5868318481445313, 0.586829833984375, 0.5861826782226562, 0.586302490234375, 0.5865001220703125, 0.5866393432617187, 0.5861959838867188, 0.5873704833984374, 0.5870489501953124, 0.5865164794921875, 0.5868236694335938, 0.5866116943359375, 0.58705712890625, 0.5863731079101563, 0.58650830078125, 0.5867161865234375, 0.5862952880859374, 1.2128675537109375, 0.586335205078125, 0.5866659545898437, 0.5865216064453125, 0.586155029296875, 0.5867110595703126, 0.587025390625, 0.5866659545898437, 0.5868226318359375, 0.5863649291992188, 0.5859788818359375, 0.5858211669921874, 0.5860321044921875, 0.5860249633789063, 0.5860086059570313, 0.5864939575195313, 0.586608642578125, 0.586387451171875, 0.586119140625, 0.5861437377929688, 0.5861488647460937, 0.586313720703125, 0.5860751342773437, 0.5867427978515625, 0.5866076049804687, 0.5862164306640625, 0.5868421020507812, 0.5864365844726562, 0.5864765625, 0.5864837036132813, 0.5860372314453125, 0.586925048828125, 0.5860485229492187, 0.58665673828125, 0.5867642822265625, 0.5860065307617187, 0.5863987426757813, 0.586060791015625, 0.586392578125, 0.5863106689453125, 0.5877749633789062, 0.58640283203125, 0.5868861694335937, 0.5868216552734375, 0.5870684204101563, 0.586323974609375, 0.5864427490234375, 0.5864703979492187, 0.5862952880859374, 0.586693603515625, 0.5876336669921876, 0.5873316040039063, 0.5868585205078125, 0.58693017578125, 0.5867694091796875, 0.5865830688476562, 0.586429443359375, 0.5870018310546875, 0.5871503295898437, 0.5864939575195313, 0.5877985229492187, 0.586144775390625, 0.5863424072265625, 1.2129935302734376, 0.5862727661132813, 0.5863915405273438, 0.5866475219726562, 0.5876971435546875, 0.5864058837890626, 0.5863065795898438, 0.5865758666992188, 0.5865635986328125, 0.5862164306640625, 0.5869281005859375, 0.586840087890625, 0.5862543334960938, 0.5875742797851562, 0.5865205688476562, 0.5862706909179688, 0.5863649291992188, 0.5861734619140625, 0.5863147583007813, 0.5860198364257813, 0.58655126953125, 0.5865543823242187, 0.58614990234375, 0.5865379638671875, 0.5857679443359375, 0.5859266357421875, 0.58583447265625, 0.5874698486328125, 0.5878701782226563, 0.5867161865234375, 0.5876551513671875, 0.5866875, 0.587071533203125, 0.5868062133789063, 0.586630126953125, 0.5871063232421875, 0.5867059326171875, 0.5871452026367188, 0.5867632446289063, 0.587125732421875, 0.5870151977539062, 0.5868308715820313, 0.5868052368164063, 0.5859778442382813, 0.5877913818359375, 0.5867008056640625, 0.5866905517578125, 0.58863818359375, 0.5863485717773438, 0.5870551147460937, 0.5870684204101563, 0.586407958984375, 0.58616015625, 0.5859154052734376, 0.5867008056640625, 0.5863854370117187, 0.5862307739257813, 0.5860966186523437, 0.5860198364257813, 0.5865277709960938, 0.5865287475585937, 0.5874104614257812, 0.586376220703125, 1.215204345703125, 0.586377197265625, 0.5866434326171875, 0.586534912109375, 0.5863895263671876, 0.5870858154296875, 0.5862635498046875, 0.5872025756835938, 0.586982421875, 0.5865861206054688, 0.5865164794921875, 0.586608642578125, 0.5866710815429688, 0.5860003662109375, 0.5871441650390625, 0.5867550659179688, 0.586144775390625, 0.5870264282226563, 0.5859225463867187, 0.5859358520507812, 0.5861325073242187, 0.5858846435546875, 0.58669873046875, 0.5869383544921875, 0.5876326293945312, 0.587109375, 0.5869526977539062, 0.5863679809570312, 0.5862573852539062, 0.5862011108398437, 0.5859225463867187, 0.586871826171875, 0.5864949951171875, 0.5862635498046875, 0.5871820678710937, 0.5862850341796875, 0.5865062255859375, 0.5861990356445312, 0.5862440795898437, 0.5863117065429687, 0.58606591796875, 0.5864724731445312, 0.5863618774414062, 0.5869076538085938, 0.5863720703125, 0.5866680297851562, 0.5867079467773437, 0.5861099243164063, 0.5866577758789062, 0.5864990844726562, 0.5863792724609375, 0.5876592407226563, 0.586555419921875, 0.5861253051757812, 0.5861632080078125, 0.586450927734375, 0.5867694091796875, 0.5861539916992188, 0.5869639892578125, 0.58669775390625, 0.5874268188476562, 0.586555419921875, 0.5859317626953126]",tokens/s,1.6779819156756426,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1308.95872,1044.905984,0.0,398.45888,290.479104,s,10,0.7298112030029297,0.07298112030029298,0.0019203205238493336,0.07210595321655273,0.07568494338989258,0.07613730201721192,0.07649918891906739,"[0.07658966064453125, 0.07404208374023437, 0.0716014404296875, 0.07261046600341797, 0.07435145568847656, 0.07145033264160157, 0.07128550720214843, 0.07136418914794922, 0.07093164825439453, 0.07558441925048828]",tokens/s,3507.7565121862385,kWh,8.692573962940111e-07,4.763112833442766e-07,2.233504646279191e-06,3.579073325917479e-06,tokens/kWh,71526894.44672821,MB,1309.261824,1044.905984,0.0,398.45888,337.28256,s,10,45.10692724609375,4.510692724609375,0.02785183557667033,4.507988037109375,4.524690771484375,4.5543905517578125,4.578150375976563,"[4.4999814453125, 4.51792333984375, 4.49663525390625, 4.50728076171875, 4.5086953125, 4.47609033203125, 4.5180908203125, 4.4837802734375, 4.514359375, 4.58409033203125]",tokens/s,13.966812604255988,kWh,5.335015537424221e-05,2.9239079429191457e-05,0.0001294696562717161,0.00021205889107514978,tokens/kWh,297087.2840114681,,s,629,45.688359863281214,0.07263650216737877,0.008731184173797845,0.07209983825683594,0.0731078628540039,0.07340298156738281,0.14137104125976563,"[0.07185408020019532, 0.07194009399414063, 0.07217356872558593, 0.07269068908691406, 0.07360717010498047, 0.0735498275756836, 0.07288934326171875, 0.07259142303466797, 0.07237420654296875, 0.07188172912597657, 0.07213568115234376, 0.0719452133178711, 0.07213875579833984, 0.07207730865478515, 0.07207014465332032, 0.0719257583618164, 0.06970162963867188, 0.06967910766601562, 0.0696995849609375, 0.07190425872802735, 0.07216028594970703, 0.07225545501708984, 0.07087206268310547, 0.06890290832519531, 0.06927776336669922, 0.06949574279785156, 0.06937395477294922, 0.06961663818359375, 0.06992694091796875, 0.07223088073730469, 0.07267635345458984, 0.07262620544433594, 0.07189091491699219, 0.07197593688964844, 0.07230976104736328, 0.07216025543212891, 0.07246540832519531, 0.07132262420654296, 0.07190016174316406, 0.07246745300292969, 0.07053619384765625, 0.06935552215576171, 0.06963097381591797, 0.07147932434082031, 0.06940361785888671, 0.0716410903930664, 0.07243264007568359, 0.07219302368164063, 0.07221145629882812, 0.07042355346679688, 0.06942002868652344, 0.06980608367919922, 0.06986752319335937, 0.07069696044921875, 0.0722165756225586, 0.0733306884765625, 0.07286374664306641, 0.07210086059570313, 0.07077069091796875, 0.07022796630859375, 0.07062940979003907, 0.07200457763671875, 0.14657945251464843, 0.07262617492675781, 0.07222579193115235, 0.07230054473876953, 0.07205580902099609, 0.07209779357910157, 0.07216537475585938, 0.07003648376464844, 0.07229849243164063, 0.07284019470214843, 0.07227597045898437, 0.07243981170654297, 0.07234457397460937, 0.07199948883056641, 0.07222169494628906, 0.07268147277832031, 0.07375667572021484, 0.07214591979980468, 0.07202201843261719, 0.07222271728515625, 0.07206604766845703, 0.0720025634765625, 0.07187251281738281, 0.0722913589477539, 0.072248291015625, 0.07194624328613282, 0.07201999664306641, 0.07205987548828124, 0.07217356872558593, 0.0712837142944336, 0.06940569305419922, 0.06932991790771484, 0.06942310333251953, 0.06952140808105468, 0.06934835052490235, 0.06939238739013671, 0.06940774536132813, 0.06954188537597657, 0.06909951782226563, 0.0694824981689453, 0.06964530944824218, 0.06932991790771484, 0.07233843231201172, 0.07286271667480469, 0.06985113525390625, 0.06934528350830078, 0.07116902160644531, 0.0729722900390625, 0.0716072998046875, 0.07257907104492188, 0.0729917449951172, 0.07322112274169922, 0.07285657501220703, 0.07319859313964844, 0.07252787017822265, 0.07272038269042969, 0.07294668579101563, 0.0729886703491211, 0.07285350036621094, 0.07272959899902344, 0.07295283508300782, 0.07268556976318359, 0.07238143920898438, 0.1415720977783203, 0.0693934097290039, 0.06967814636230468, 0.06940972900390625, 0.06931148529052734, 0.06929817962646484, 0.06965049743652343, 0.0701173095703125, 0.07237427520751953, 0.07200054168701171, 0.07237423706054688, 0.07208857727050781, 0.07215309143066406, 0.07220838165283203, 0.07202713775634766, 0.0719810562133789, 0.07221862030029297, 0.07206092834472656, 0.07223705291748046, 0.07211007690429687, 0.07236300659179687, 0.07231590270996094, 0.07095603179931641, 0.0694814682006836, 0.06937395477294922, 0.06936780548095703, 0.07176601409912109, 0.07184486389160157, 0.0720343017578125, 0.07231897735595703, 0.07199129486083984, 0.07193299102783203, 0.0723834228515625, 0.07228825378417969, 0.07213260650634766, 0.07228108978271484, 0.07208448028564453, 0.07200153350830078, 0.07205990600585938, 0.07209983825683594, 0.07197491455078125, 0.07196057891845703, 0.0720547866821289, 0.073133056640625, 0.07183769226074219, 0.07257807922363281, 0.0721899185180664, 0.07195340728759765, 0.0722677764892578, 0.07214284515380859, 0.07201487731933594, 0.06975177764892578, 0.06966681671142579, 0.06874931335449219, 0.06921011352539062, 0.06935346984863282, 0.06987059020996093, 0.07226265716552735, 0.07209164428710937, 0.07197081756591797, 0.07196057891845703, 0.0713338851928711, 0.07061196899414063, 0.1456865234375, 0.0722279052734375, 0.07271212768554687, 0.07285043334960938, 0.0725432357788086, 0.07223500823974609, 0.0720865249633789, 0.07014915466308594, 0.06968726348876954, 0.06959820556640625, 0.06964019012451172, 0.0694681625366211, 0.07034880065917969, 0.07004876708984376, 0.06967193603515626, 0.06940160369873047, 0.07108914947509766, 0.07227597045898437, 0.07151821136474609, 0.06967501068115234, 0.06957772827148437, 0.0698071060180664, 0.07123967742919922, 0.07210086059570313, 0.07223603057861328, 0.07214284515380859, 0.07249510192871093, 0.07254835510253907, 0.07294361877441406, 0.07138508605957031, 0.07067545318603516, 0.07234969329833985, 0.07242546844482421, 0.07169741058349609, 0.07204966735839843, 0.06960543823242188, 0.06955718231201172, 0.07074406433105469, 0.07213772583007813, 0.07239683532714844, 0.07193702697753906, 0.07239676666259766, 0.0722012176513672, 0.07219200134277344, 0.07279411315917969, 0.07149260711669922, 0.07210393524169922, 0.07228211212158203, 0.07218380737304687, 0.07210598754882812, 0.07205171203613281, 0.07216435241699219, 0.07028326416015625, 0.06974668884277344, 0.07239577484130859, 0.07243673706054687, 0.07223094177246094, 0.07230358123779297, 0.07244390106201172, 0.07284838104248047, 0.07209062194824219, 0.072163330078125, 0.07201894378662109, 0.14699827575683594, 0.07176294708251953, 0.0707747802734375, 0.0720343017578125, 0.0722135009765625, 0.07226268768310547, 0.0720823974609375, 0.07234976196289063, 0.07255648040771484, 0.07198515319824218, 0.07197798156738282, 0.07256678771972656, 0.07222476959228516, 0.07208755493164062, 0.07251971435546875, 0.07147618865966797, 0.06935346984863282, 0.06952550506591797, 0.06944051361083985, 0.06955213165283203, 0.06944255828857422, 0.06934323120117188, 0.07061299133300782, 0.07203533172607422, 0.07238861083984376, 0.07203225708007813, 0.07213568115234376, 0.0720374755859375, 0.07249091339111328, 0.0709017562866211, 0.06946304321289062, 0.07154790496826172, 0.07224217224121093, 0.07243673706054687, 0.07215615844726563, 0.07217459106445312, 0.07221247863769531, 0.07201894378662109, 0.07098880004882813, 0.06991667175292969, 0.07119564819335937, 0.07190630340576172, 0.0721786880493164, 0.07199334716796875, 0.06973235321044922, 0.06972415924072266, 0.06972930908203125, 0.06958694458007812, 0.06966268920898437, 0.07221145629882812, 0.07257907104492188, 0.07218688201904297, 0.07213772583007813, 0.07218380737304687, 0.07208345794677734, 0.07205068969726562, 0.07378431701660157, 0.07249612426757812, 0.07221862030029297, 0.0720650863647461, 0.07227897644042969, 0.07233740997314453, 0.07240601348876953, 0.1438771514892578, 0.07196463775634766, 0.07234867095947266, 0.07229952239990234, 0.07298047637939453, 0.07293030548095703, 0.07180902099609375, 0.07185408020019532, 0.07234047698974609, 0.07194316864013672, 0.07218278503417969, 0.07320985412597657, 0.07245516967773437, 0.07180287933349609, 0.07000985717773438, 0.0729917449951172, 0.07227187347412109, 0.07225958251953125, 0.07234047698974609, 0.07209677124023438, 0.07210393524169922, 0.07231283569335938, 0.07199436950683594, 0.07164825439453125, 0.06944358062744141, 0.0694302749633789, 0.06980812835693359, 0.06952652740478515, 0.06984703826904297, 0.07122022247314454, 0.07212850952148438, 0.06986752319335937, 0.06965248107910156, 0.06977843475341797, 0.06978047943115234, 0.06932173156738282, 0.0697364501953125, 0.0695572509765625, 0.06944153594970703, 0.07092131042480469, 0.07262095642089844, 0.07213362884521485, 0.0723927001953125, 0.0719974365234375, 0.07194422149658203, 0.0721714859008789, 0.07187967681884766, 0.0721244125366211, 0.07195852661132812, 0.07079424285888672, 0.06962790679931641, 0.06972518157958985, 0.06952345275878906, 0.06946918487548828, 0.06943539428710938, 0.06939238739013671, 0.06983372497558593, 0.06957260894775391, 0.06938521575927735, 0.06923776245117187, 0.07017164611816407, 0.06932582092285157, 0.06944461059570313, 0.1413570556640625, 0.06928076934814453, 0.07196774291992188, 0.0723220443725586, 0.07226573181152343, 0.07199231719970703, 0.07206502532958985, 0.07222476959228516, 0.07213875579833984, 0.06940057373046875, 0.06947840118408204, 0.07286784362792968, 0.07213260650634766, 0.0724295654296875, 0.07200563049316407, 0.07263334655761719, 0.07273677062988282, 0.07225856018066407, 0.0693411865234375, 0.0695367660522461, 0.07020543670654297, 0.07206809234619141, 0.07203020477294922, 0.0722001953125, 0.07224012756347656, 0.07218688201904297, 0.07218694305419922, 0.07206291198730469, 0.07192678070068359, 0.0715857925415039, 0.07136051177978515, 0.0714076156616211, 0.07242240142822266, 0.07221453094482422, 0.07244499206542969, 0.07197689819335938, 0.07119974517822265, 0.07206297302246094, 0.0721981430053711, 0.0724316177368164, 0.07192473602294921, 0.07172608184814454, 0.06988082885742188, 0.06962483215332031, 0.07035497283935546, 0.06945276641845703, 0.06980198669433593, 0.07191449737548829, 0.07256678771972656, 0.07234457397460937, 0.07288422393798828, 0.07266918182373047, 0.07230668640136718, 0.07237324523925781, 0.0723978271484375, 0.07207526397705079, 0.07200057220458984, 0.07223903656005859, 0.07222172546386718, 0.07205680084228516, 0.0722012176513672, 0.07250841522216797, 0.07297948455810546, 0.14137648010253906, 0.06947328186035157, 0.069607421875, 0.06972518157958985, 0.07188377380371094, 0.07261798095703124, 0.07253196716308594, 0.07225138854980469, 0.0723763198852539, 0.07227289581298828, 0.0724869155883789, 0.0720404510498047, 0.07224832153320312, 0.0722790756225586, 0.07235478210449219, 0.07247462463378906, 0.07243059539794922, 0.07224217224121093, 0.07231999969482422, 0.07176908874511718, 0.07236198425292968, 0.07177011108398437, 0.06941900634765626, 0.0709969940185547, 0.07273983764648438, 0.07211827087402344, 0.07241318511962891, 0.07507456207275391, 0.07245209503173829, 0.07226681518554688, 0.07216531372070313, 0.07257190704345703, 0.07180902099609375, 0.07219200134277344, 0.07004978942871094, 0.06961151885986328, 0.06961663818359375, 0.06986041259765625, 0.06977324676513671, 0.06963404846191407, 0.06956646728515625, 0.06976614379882813, 0.06975692749023438, 0.06940774536132813, 0.07025663757324219, 0.06947020721435547, 0.07162265777587891, 0.07189810943603515, 0.07244185638427734, 0.07230054473876953, 0.07227497863769532, 0.07341053009033204, 0.07264460754394532, 0.07066521453857422, 0.06972415924072266, 0.0694620132446289, 0.06928281402587891, 0.06948761749267578, 0.0691251220703125, 0.06890598297119141, 0.06905343627929687, 0.06960025787353516, 0.06945689392089843, 0.14089112854003907, 0.0694128646850586, 0.069570556640625, 0.06914662170410156, 0.06950399780273438, 0.07003545379638672, 0.06975794982910156, 0.06944870758056641, 0.070181884765625, 0.06986444854736328, 0.069607421875, 0.0728453140258789, 0.07321395111083985, 0.07279718780517579, 0.07399935913085938, 0.07330099487304688, 0.07279718780517579, 0.07354777526855469, 0.07338188934326172, 0.073301025390625, 0.07308284759521484, 0.0729917449951172, 0.07306854248046875, 0.0727388153076172, 0.07293440246582031, 0.0727357406616211, 0.07297023773193359, 0.07296102142333985, 0.07303270721435547, 0.07281462097167969, 0.07316886138916015, 0.073059326171875, 0.0725074234008789, 0.07300093078613282, 0.07273785400390625, 0.07161849975585938, 0.06959820556640625, 0.06925823974609376, 0.06970883178710938, 0.06981629180908203, 0.06969036865234375, 0.06949683380126953, 0.06930738830566406, 0.06938419342041016, 0.06971298980712891, 0.0707419204711914, 0.07269376373291016, 0.07251353454589844, 0.07328562927246093, 0.06910873413085937, 0.06949581146240234, 0.06906163024902344, 0.07005286407470702, 0.07312281799316406, 0.07338393402099609, 0.07292108917236328, 0.07231078338623047, 0.07324467468261718, 0.07336345672607422, 0.07308595275878907, 0.07299993896484375, 0.07279001617431641, 0.07310745239257813, 0.14934938049316407, 0.07343206024169922, 0.07323033905029297, 0.07366553497314453, 0.07251865386962891, 0.07295795440673829, 0.07302963256835937, 0.073480224609375, 0.07338902282714843, 0.07340748596191406, 0.0733655014038086, 0.07314534759521485, 0.07351602935791016, 0.07292825317382813, 0.07283715057373047, 0.07243465423583985, 0.07391846466064453, 0.07298047637939453, 0.07312384033203125, 0.07325183868408203, 0.07271321868896484, 0.07309209442138671, 0.07304192352294922, 0.07337062072753907, 0.0729354248046875, 0.07303884887695312, 0.07325081634521484, 0.0731668472290039, 0.07339622497558594, 0.07318016052246094, 0.07310028839111328, 0.07337574768066406, 0.07324774169921874, 0.07300300598144531, 0.07360620880126953, 0.0735692138671875, 0.07393798065185547, 0.07294355010986328, 0.07357542419433594, 0.07405875396728516, 0.07427174377441406, 0.07364198303222656, 0.07310950469970703, 0.07334912109375, 0.0739277114868164, 0.07357746887207031, 0.07303472137451172, 0.07309414672851562, 0.07331635284423828, 0.07301529693603516, 0.0722135009765625, 0.07023104095458985, 0.07103794860839843, 0.06991667175292969, 0.0706355209350586, 0.0701685791015625, 0.06987264251708984, 0.0711445083618164, 0.0702627182006836, 0.0697343978881836, 0.06988800048828125, 0.07153561401367188, 0.07306034851074218]",tokens/s,13.767182754693582,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1513.762816,1825.046528,0.0,1178.599424,1091.247104,s,10,1.2350289306640625,0.12350289306640624,0.0006278590763107522,0.12335015869140625,0.12446991119384765,0.12452100448608398,0.12456187911987306,"[0.12457209777832032, 0.12258493041992187, 0.123223388671875, 0.12296672058105469, 0.12311740875244141, 0.12295513916015625, 0.1234769287109375, 0.12388646697998047, 0.12445855712890624, 0.12378729248046876]",tokens/s,2072.8259366551956,kWh,1.4517080513280905e-06,7.954704248798562e-07,6.551292503901485e-06,8.798470980109433e-06,tokens/kWh,29095964.580520324,MB,1513.762816,1825.046528,0.0,1178.599424,1159.734784,s,10,69.32218505859375,6.932218505859375,0.003149777960520474,6.933029541015625,6.935241162109374,6.935911108398438,6.936447065429688,"[6.93331884765625, 6.93092578125, 6.9333740234375, 6.9281669921875, 6.932740234375, 6.934138671875, 6.93509228515625, 6.93250244140625, 6.9253447265625, 6.9365810546875]",tokens/s,9.087999743047627,kWh,8.183205577456532e-05,4.4849814286347e-05,0.00036005222300090266,0.000486734093061815,tokens/kWh,129434.12203508627,,s,629,70.322675743103,0.11180075634833549,0.014707892425374562,0.10997555541992188,0.11031490783691406,0.11055718231201171,0.23335628784179688,"[0.10998989105224609, 0.11027967834472656, 0.11003801727294922, 0.11055718231201171, 0.11002572631835937, 0.10997555541992188, 0.10988441467285157, 0.11009740447998047, 0.10994892883300782, 0.1100421142578125, 0.10996940612792969, 0.11002470397949218, 0.11002470397949218, 0.11015475463867187, 0.10990592193603516, 0.10996428680419922, 0.10994073486328125, 0.10987417602539062, 0.10995712280273437, 0.10995814514160156, 0.1099161605834961, 0.10992947387695312, 0.11009024047851562, 0.1098045425415039, 0.1099530258178711, 0.10990898895263672, 0.10999091339111328, 0.1099161605834961, 0.10990386962890625, 0.10983321380615234, 0.10983936309814453, 0.10992025756835938, 0.10994380950927735, 0.11009740447998047, 0.11012198638916015, 0.11004415893554688, 0.11021311950683593, 0.11015270233154296, 0.10997555541992188, 0.10997042846679687, 0.1100769271850586, 0.10993254089355468, 0.11002880096435547, 0.10997248077392578, 0.1100738525390625, 0.10994585418701172, 0.10987519836425781, 0.11011788940429687, 0.11002674865722656, 0.11031449890136719, 0.11004313659667969, 0.10998271942138672, 0.1100400619506836, 0.11014860534667968, 0.11063295745849609, 0.10996736145019531, 0.10992845153808593, 0.10991410827636719, 0.11014860534667968, 0.1098608627319336, 0.11007590484619141, 0.10992332458496094, 0.23466700744628907, 0.10985779571533204, 0.10981171417236328, 0.1099315185546875, 0.10995507049560548, 0.10985472106933594, 0.10984754943847656, 0.10977689361572265, 0.10997862243652344, 0.1100738525390625, 0.11007488250732422, 0.10998169708251954, 0.11045171356201172, 0.11078348541259765, 0.11011686706542968, 0.10994380950927735, 0.10991513824462891, 0.10980556488037109, 0.10991718292236329, 0.10992332458496094, 0.10974310302734375, 0.10984857940673828, 0.10985164642333985, 0.10992947387695312, 0.11002572631835937, 0.11035648345947266, 0.11005951690673828, 0.10994073486328125, 0.10990694427490234, 0.1097676773071289, 0.10987827301025391, 0.10998374176025391, 0.10983628845214843, 0.10985164642333985, 0.10997555541992188, 0.11007590484619141, 0.10993049621582031, 0.10982195281982422, 0.10988543701171875, 0.11021414184570312, 0.11008614349365234, 0.11017420959472657, 0.10999193572998046, 0.10999603271484375, 0.1100400619506836, 0.10992127990722657, 0.11122073364257813, 0.10994790649414063, 0.1098946533203125, 0.10998067474365235, 0.11016397094726563, 0.11005030059814454, 0.10995097351074219, 0.10986803436279297, 0.10994483184814453, 0.11000115203857422, 0.10993663787841797, 0.1098629150390625, 0.10973286437988282, 0.11027046203613282, 0.10998169708251954, 0.11012403106689453, 0.11069951629638672, 0.2334771270751953, 0.109949951171875, 0.10998374176025391, 0.109949951171875, 0.11041484832763672, 0.11017215728759766, 0.11003084564208984, 0.1100421142578125, 0.11011174774169921, 0.11004927825927735, 0.10999603271484375, 0.11010457611083985, 0.10996121978759765, 0.1101322250366211, 0.10993869018554688, 0.11010150146484375, 0.11002470397949218, 0.1104005126953125, 0.11045171356201172, 0.11004927825927735, 0.11002982330322265, 0.11002470397949218, 0.10999193572998046, 0.10997042846679687, 0.10986803436279297, 0.1099315185546875, 0.10996018981933593, 0.11003699493408203, 0.10990592193603516, 0.11009228515625, 0.10992537689208984, 0.11011686706542968, 0.11002572631835937, 0.10996121978759765, 0.1100738525390625, 0.11009945678710938, 0.1099683837890625, 0.10994278717041016, 0.11000012969970703, 0.10996940612792969, 0.11009638214111328, 0.11007590484619141, 0.10986803436279297, 0.11006259155273437, 0.10995507049560548, 0.10999501037597656, 0.10975743865966797, 0.11042304229736329, 0.11009535980224609, 0.1101465606689453, 0.11001651000976563, 0.11085004425048828, 0.11017932891845703, 0.11005542755126953, 0.11006463623046875, 0.10995097351074219, 0.10983526611328125, 0.11001036834716797, 0.10994278717041016, 0.11001856231689452, 0.11001139068603516, 0.11006976318359375, 0.1099315185546875, 0.23315866088867188, 0.10991718292236329, 0.10996428680419922, 0.10990898895263672, 0.10994687652587891, 0.10980659484863281, 0.11001344299316407, 0.10984243011474609, 0.1099192352294922, 0.10988441467285157, 0.10989568328857421, 0.10988543701171875, 0.11012300872802734, 0.10975027465820313, 0.10980352020263671, 0.10992742156982421, 0.10989055633544922, 0.10984754943847656, 0.10997657775878907, 0.10991001892089844, 0.10981273651123047, 0.109844482421875, 0.10982809448242188, 0.1099530258178711, 0.11010150146484375, 0.110129150390625, 0.10988031768798828, 0.10981785583496094, 0.10985779571533204, 0.10993561553955078, 0.10999910736083984, 0.10990694427490234, 0.10990796661376953, 0.10995097351074219, 0.11003801727294922, 0.10986803436279297, 0.10986393737792968, 0.10978406524658203, 0.10994175720214844, 0.10994585418701172, 0.11002674865722656, 0.1098967056274414, 0.10996121978759765, 0.10992947387695312, 0.11166207885742188, 0.11004415893554688, 0.11000012969970703, 0.10999807739257812, 0.10993459320068359, 0.11005235290527343, 0.10998169708251954, 0.11006668853759766, 0.11001651000976563, 0.11010150146484375, 0.11001548767089844, 0.10998579406738282, 0.10998271942138672, 0.1099653091430664, 0.10990386962890625, 0.11008306884765626, 0.10987110137939453, 0.10999807739257812, 0.10995916748046874, 0.23322726440429686, 0.10992332458496094, 0.11020185852050782, 0.11035545349121094, 0.10995814514160156, 0.10994175720214844, 0.10995507049560548, 0.10988441467285157, 0.10981273651123047, 0.10998374176025391, 0.10985574340820313, 0.10997452545166016, 0.10989772796630859, 0.10986803436279297, 0.11038003540039062, 0.11002880096435547, 0.11003494262695312, 0.11040563201904297, 0.1099653091430664, 0.11018342590332031, 0.11008819580078125, 0.11007488250732422, 0.10995916748046874, 0.10976461029052734, 0.11010047912597656, 0.1102008285522461, 0.11007488250732422, 0.10994483184814453, 0.11033497619628906, 0.11017727661132813, 0.1098629150390625, 0.10993561553955078, 0.11003699493408203, 0.11001241302490235, 0.11000627136230469, 0.10999295806884765, 0.11002162933349609, 0.11002265930175781, 0.10992230224609376, 0.10989158630371093, 0.1097359390258789, 0.10989977264404296, 0.11002674865722656, 0.10999910736083984, 0.10985779571533204, 0.1099683837890625, 0.10974720001220703, 0.109876220703125, 0.10991001892089844, 0.10994790649414063, 0.10997452545166016, 0.11035135650634766, 0.11020697784423829, 0.11008409881591796, 0.110060546875, 0.10996326446533203, 0.11019058990478516, 0.11144703674316406, 0.10995097351074219, 0.10988646697998047, 0.11015679931640625, 0.11033395385742187, 0.11005542755126953, 0.23340646362304687, 0.11011686706542968, 0.11028275299072265, 0.11017420959472657, 0.11006259155273437, 0.1100738525390625, 0.11010867309570313, 0.11038105773925781, 0.10999091339111328, 0.10990284729003906, 0.109949951171875, 0.11034009552001953, 0.10992845153808593, 0.10986495971679687, 0.1099192352294922, 0.1098946533203125, 0.10980556488037109, 0.1097553939819336, 0.11004518127441407, 0.11012300872802734, 0.10977996826171875, 0.10986393737792968, 0.10984038543701172, 0.10991718292236329, 0.1098936309814453, 0.10987519836425781, 0.10983116912841796, 0.1099130859375, 0.11000934600830078, 0.1101690902709961, 0.11019468688964844, 0.10981478118896484, 0.1098608627319336, 0.10990796661376953, 0.11005542755126953, 0.10980352020263671, 0.10990284729003906, 0.10992845153808593, 0.11151667022705078, 0.10988748931884766, 0.11004108428955078, 0.11066572570800781, 0.11073741149902344, 0.11011788940429687, 0.1100206069946289, 0.10988748931884766, 0.11012198638916015, 0.11024691009521484, 0.1098967056274414, 0.1100052490234375, 0.11016089630126953, 0.11015782165527344, 0.11018956756591797, 0.11015065765380859, 0.110060546875, 0.11035648345947266, 0.1099335708618164, 0.1101629409790039, 0.10992639923095703, 0.11004108428955078, 0.10989158630371093, 0.11015372467041015, 0.11010047912597656, 0.23385498046875, 0.10988851165771485, 0.11034111785888671, 0.10995097351074219, 0.10985574340820313, 0.11010662078857422, 0.11007078552246094, 0.10991820526123047, 0.10985984039306641, 0.10997248077392578, 0.10986393737792968, 0.11004723358154297, 0.11019468688964844, 0.1100052490234375, 0.11016397094726563, 0.1102387237548828, 0.11004518127441407, 0.11020902252197265, 0.1100943374633789, 0.10998989105224609, 0.11055513763427735, 0.11010253143310547, 0.10998169708251954, 0.11044454193115234, 0.11019570922851563, 0.1099653091430664, 0.10991513824462891, 0.11001753234863282, 0.10987007904052734, 0.1103984603881836, 0.10986188507080077, 0.10991104125976563, 0.11026534271240235, 0.11026534271240235, 0.11053056335449218, 0.11001753234863282, 0.11013529968261719, 0.11008716583251953, 0.11022541046142578, 0.10997657775878907, 0.10988236999511719, 0.11003494262695312, 0.11012198638916015, 0.11003187561035156, 0.1098260498046875, 0.11009945678710938, 0.10990898895263672, 0.11006771087646484, 0.1099161605834961, 0.10987827301025391, 0.1099130859375, 0.11001036834716797, 0.11009843444824219, 0.1100021743774414, 0.11010355377197266, 0.11002572631835937, 0.11081318664550781, 0.11031654357910156, 0.10989875030517578, 0.11011993408203125, 0.10990898895263672, 0.11030118560791016, 0.10994483184814453, 0.23407717895507812, 0.11024076843261718, 0.10993766021728515, 0.11036876678466796, 0.11012403106689453, 0.10995200347900391, 0.11002674865722656, 0.1097349090576172, 0.10983219146728515, 0.10998681640625, 0.10994380950927735, 0.10989158630371093, 0.1099192352294922, 0.10996736145019531, 0.10994892883300782, 0.10987725067138672, 0.1102376937866211, 0.10967346954345703, 0.11009945678710938, 0.11035238647460938, 0.10991820526123047, 0.10983628845214843, 0.10993561553955078, 0.10991513824462891, 0.10991718292236329, 0.11002982330322265, 0.10980352020263671, 0.10987315368652344, 0.10994380950927735, 0.10986803436279297, 0.10989158630371093, 0.10974515533447265, 0.10991513824462891, 0.10988851165771485, 0.11000627136230469, 0.10975743865966797, 0.10976255798339844, 0.10985472106933594, 0.10994483184814453, 0.10986905670166015, 0.11017011260986329, 0.11067801666259766, 0.11012812805175781, 0.1102387237548828, 0.11023974609375, 0.11021209716796875, 0.11020697784423829, 0.11080191802978516, 0.11018956756591797, 0.11028173065185547, 0.11045887756347657, 0.11006873321533203, 0.11001548767089844, 0.11007180786132813, 0.10984960174560547, 0.1098045425415039, 0.10983936309814453, 0.10996018981933593, 0.1098270721435547, 0.1103984603881836, 0.11062681579589843, 0.11046092987060546, 0.1100206069946289, 0.23471104431152343, 0.10992947387695312, 0.10997350311279297, 0.10981990051269531, 0.11024486541748046, 0.11018342590332031, 0.10990796661376953, 0.10981171417236328, 0.11014348602294922, 0.11026739501953126, 0.10997760009765625, 0.10999705505371093, 0.1097492446899414, 0.10981785583496094, 0.10969292449951172, 0.10985574340820313, 0.10983116912841796, 0.11017215728759766, 0.10983116912841796, 0.11038310241699219, 0.11015577697753906, 0.10982911682128907, 0.10989977264404296, 0.10992845153808593, 0.10990694427490234, 0.10996940612792969, 0.109949951171875, 0.10994892883300782, 0.10998681640625, 0.10987007904052734, 0.11038105773925781, 0.11008409881591796, 0.11003289794921875, 0.10989568328857421, 0.1100021743774414, 0.10995814514160156, 0.1100400619506836, 0.11009945678710938, 0.10990284729003906, 0.10982297515869141, 0.10980863952636719, 0.10979840087890624, 0.10979328155517579, 0.10987725067138672, 0.10977484893798828, 0.10974412536621093, 0.10975641632080078, 0.10971238708496094, 0.10985062408447266, 0.10984243011474609, 0.10988339233398438, 0.10988031768798828, 0.10987519836425781, 0.10985062408447266, 0.10984550476074219, 0.10994175720214844, 0.10979737854003906, 0.10983219146728515, 0.10980659484863281, 0.10979942321777343, 0.10972467041015625, 0.10989977264404296, 0.10978406524658203, 0.23415501403808595, 0.11078553771972656, 0.11036672210693359, 0.11091149139404297, 0.11072921752929688, 0.11045990753173827, 0.11007794952392579, 0.10978508758544922, 0.10992537689208984, 0.1098792953491211, 0.10985574340820313, 0.10979122924804688, 0.10985164642333985, 0.1100738525390625, 0.1099653091430664, 0.10994483184814453, 0.11005235290527343, 0.10987315368652344, 0.11015270233154296, 0.10990080261230468, 0.10979532623291016, 0.10991001892089844, 0.10990489959716797, 0.11083776092529297, 0.11055718231201171, 0.10991104125976563, 0.1105827865600586, 0.11010765075683594, 0.11010969543457032, 0.10981683349609375, 0.11006771087646484, 0.11014144134521485, 0.11008102416992188, 0.10999501037597656, 0.10991104125976563, 0.10976051330566407, 0.10998579406738282, 0.11004518127441407, 0.10994278717041016, 0.10981478118896484, 0.11005644989013671, 0.10996736145019531, 0.10994278717041016, 0.10976051330566407, 0.10995404815673829, 0.10989260864257812, 0.11064320373535157, 0.1107957763671875, 0.11001856231689452, 0.11012710571289062, 0.11023359680175782, 0.11013632202148438, 0.10996428680419922, 0.1100021743774414, 0.11024384307861328, 0.10992435455322265, 0.10974002838134765, 0.11081215667724609, 0.11024076843261718, 0.11022643280029297, 0.1098967056274414, 0.11006361389160156, 0.10993561553955078]",tokens/s,8.944483317128187,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1231.42144,879.230976,0.0,232.783872,169.719808,s,10,0.35891270065307623,0.035891270065307616,0.001166922312453486,0.03606051254272461,0.03654228057861328,0.037586180877685546,0.03842130111694336,"[0.03863008117675781, 0.03461849594116211, 0.03453500747680664, 0.034383518218994144, 0.03608425521850586, 0.03613267135620117, 0.036015392303466794, 0.036310302734375, 0.036166206359863284, 0.03603676986694336]",tokens/s,7132.653693619183,kWh,4.2057203039815654e-07,2.3039066429929688e-07,8.883686426833978e-07,1.5393313373808513e-06,tokens/kWh,166305975.7073354,MB,1231.42144,879.230976,0.0,232.783872,199.793152,s,10,21.796639404296876,2.179663940429687,0.03379298018028798,2.1997751464843747,2.2025270751953125,2.2029048950195316,2.2032071508789066,"[2.15886328125, 2.11872216796875, 2.1151240234375, 2.1993291015625, 2.20328271484375, 2.20231494140625, 2.20022119140625, 2.201608642578125, 2.194730224609375, 2.202443115234375]",tokens/s,28.903538215886858,kWh,2.6054930089725813e-05,1.4278879354016576e-05,5.1935423616310185e-05,9.22692330600526e-05,tokens/kWh,682784.4765870875,,s,629,22.081034248352047,0.035104982906760014,0.004313579122612795,0.03483135986328125,0.03513838195800781,0.035482418823242184,0.06834089904785158,"[0.035579902648925785, 0.0354150390625, 0.035542015075683595, 0.0358737907409668, 0.03517337417602539, 0.035922943115234376, 0.03607551956176758, 0.03511808013916016, 0.03547443389892578, 0.035765247344970705, 0.035937278747558594, 0.03575091171264649, 0.03546214294433594, 0.04169113540649414, 0.03495731353759766, 0.03391795349121094, 0.033565696716308595, 0.03336908721923828, 0.03351347351074219, 0.03344179153442383, 0.03361177444458008, 0.03342745590209961, 0.03343564987182617, 0.03342745590209961, 0.03340185546875, 0.03341926574707031, 0.03338649749755859, 0.03336294555664063, 0.033552383422851564, 0.03352883148193359, 0.03450368118286133, 0.03436032104492188, 0.034111488342285154, 0.03450265502929688, 0.03355136108398438, 0.034164737701416016, 0.03474431991577148, 0.034351104736328124, 0.03521331024169922, 0.035111934661865234, 0.03381760025024414, 0.034427902221679685, 0.033756160736083986, 0.0334510383605957, 0.0331734733581543, 0.03338854217529297, 0.03346944046020508, 0.03350425720214844, 0.03346636962890625, 0.03356671905517578, 0.03343769454956055, 0.03313971328735352, 0.03326259231567383, 0.03314790344238281, 0.03320729446411133, 0.03347967910766601, 0.03352678298950195, 0.033538047790527346, 0.03310182571411133, 0.03322163009643555, 0.033478656768798826, 0.03340800094604492, 0.06847795104980468, 0.03334348678588867, 0.03356576156616211, 0.03346118545532226, 0.033667072296142575, 0.03376025772094727, 0.033555454254150394, 0.03353497695922852, 0.03349913787841797, 0.03347455978393555, 0.03362713623046875, 0.033650688171386715, 0.03344998550415039, 0.03351244735717773, 0.033532928466796875, 0.033718273162841796, 0.033614849090576174, 0.03364659118652344, 0.033478656768798826, 0.03355136108398438, 0.03344998550415039, 0.034282497406005856, 0.03367833709716797, 0.033516544342041016, 0.03338854217529297, 0.033584129333496096, 0.033539070129394534, 0.033326080322265625, 0.03335987091064453, 0.03366604614257813, 0.03334143829345703, 0.033258495330810545, 0.03334041595458984, 0.03343155288696289, 0.03324620819091797, 0.03349401473999023, 0.03338547134399414, 0.03338137435913086, 0.0335175666809082, 0.03442892837524414, 0.03411558532714844, 0.034249729156494144, 0.03337011337280273, 0.03331174468994141, 0.033413120269775394, 0.033301502227783206, 0.03349606323242187, 0.03334143829345703, 0.03332403182983398, 0.033527809143066405, 0.0335206413269043, 0.03582156753540039, 0.035253246307373046, 0.03405209732055664, 0.03366604614257813, 0.033827838897705076, 0.03394559860229492, 0.03346636962890625, 0.03348582458496094, 0.03372544097900391, 0.03336703872680664, 0.03363942337036133, 0.033410049438476565, 0.06798847961425782, 0.033957889556884766, 0.03349401473999023, 0.03372236633300781, 0.033659934997558594, 0.033262561798095704, 0.033339393615722655, 0.03347148895263672, 0.03481190490722656, 0.03399270248413086, 0.033448993682861326, 0.033379295349121096, 0.03344179153442383, 0.03343155288696289, 0.0330618896484375, 0.03346124649047852, 0.033410049438476565, 0.033430526733398434, 0.033495040893554685, 0.03334656143188477, 0.03343564987182617, 0.033446910858154294, 0.033513534545898435, 0.03330553436279297, 0.03338547134399414, 0.033484798431396484, 0.03334147262573242, 0.03339465713500977, 0.03335782241821289, 0.03341516876220703, 0.033355777740478515, 0.03362713623046875, 0.03338854217529297, 0.033360897064208986, 0.03363020706176758, 0.03340800094604492, 0.03336505508422852, 0.03361990356445312, 0.03336191940307617, 0.035092479705810545, 0.034958335876464845, 0.03446783828735352, 0.033719329833984374, 0.0333260498046875, 0.03360768127441406, 0.03371417617797851, 0.03365785598754883, 0.033653759002685545, 0.03359027099609375, 0.03361177444458008, 0.033465343475341795, 0.033562625885009766, 0.03357183837890625, 0.03344076919555664, 0.0335093765258789, 0.03359231948852539, 0.033463294982910154, 0.03351551818847656, 0.03339059066772461, 0.0335022087097168, 0.03354009628295898, 0.033432575225830076, 0.033476608276367184, 0.06785228729248047, 0.038763519287109374, 0.03499212646484375, 0.03476172637939453, 0.03466035079956055, 0.03442892837524414, 0.03445862579345703, 0.0349194221496582, 0.03481087875366211, 0.03482009506225586, 0.03474227142333984, 0.03470336151123047, 0.03492768096923828, 0.0348221435546875, 0.034755519866943356, 0.03479654312133789, 0.035053569793701174, 0.03477503967285156, 0.03513651275634765, 0.03483647918701172, 0.03467161560058594, 0.03479244613647461, 0.034738174438476564, 0.03485184097290039, 0.03481292724609375, 0.03513651275634765, 0.03499728012084961, 0.034848735809326174, 0.03482009506225586, 0.03604684829711914, 0.03580108642578125, 0.034900993347167966, 0.03499622344970703, 0.03482009506225586, 0.03486617660522461, 0.034716670989990234, 0.034871295928955076, 0.03480883026123047, 0.03496857452392578, 0.034961406707763674, 0.03484262466430664, 0.03478732681274414, 0.0348671989440918, 0.03487539291381836, 0.03496038436889649, 0.03478121566772461, 0.03482006454467773, 0.03547750473022461, 0.03485184097290039, 0.034953216552734374, 0.034854911804199216, 0.03493478393554687, 0.03474943923950195, 0.03476889419555664, 0.03471974563598633, 0.034825214385986326, 0.035095550537109374, 0.0350300178527832, 0.034802688598632815, 0.034781185150146485, 0.034677761077880856, 0.034490367889404294, 0.034372608184814454, 0.07120281219482422, 0.03474227142333984, 0.03492659378051758, 0.03486515045166016, 0.035027999877929684, 0.035214336395263675, 0.035108863830566404, 0.03492655944824219, 0.03491635131835937, 0.03499212646484375, 0.0349409294128418, 0.03500236892700195, 0.03487641525268555, 0.03486105728149414, 0.03492659378051758, 0.03494911956787109, 0.03502489471435547, 0.034909183502197266, 0.03505152130126953, 0.03499724960327148, 0.03479142379760742, 0.03497983932495117, 0.03498291015625, 0.03495116806030273, 0.03492454528808594, 0.03508633422851563, 0.034934814453125, 0.03494499206542969, 0.034936832427978515, 0.034841598510742186, 0.03491839981079101, 0.035023872375488284, 0.03488665771484375, 0.03486617660522461, 0.03508428955078125, 0.03497881698608398, 0.03496448135375976, 0.03487334442138672, 0.034991104125976565, 0.035092479705810545, 0.03487641525268555, 0.0350013427734375, 0.035125247955322264, 0.03501567840576172, 0.035350528717041016, 0.03521843338012695, 0.03506687927246094, 0.03503104019165039, 0.034985984802246094, 0.03507308959960938, 0.034981822967529295, 0.03523174285888672, 0.0352174072265625, 0.034923519134521484, 0.034361343383789066, 0.03486822509765625, 0.03495731353759766, 0.03501875305175781, 0.034956287384033204, 0.03489484786987305, 0.034835456848144535, 0.035156993865966796, 0.03473408126831055, 0.07153561401367188, 0.034855934143066404, 0.03496345520019531, 0.034987071990966796, 0.035133377075195316, 0.03502592086791992, 0.035043327331542966, 0.035209217071533204, 0.03504844665527344, 0.03502592086791992, 0.034936832427978515, 0.03493478393554687, 0.0358205451965332, 0.035659774780273434, 0.03496038436889649, 0.03518668746948242, 0.03511296081542969, 0.03490304183959961, 0.03482624053955078, 0.03487539291381836, 0.03492761611938477, 0.03479142379760742, 0.035125247955322264, 0.03506585693359375, 0.03505254364013672, 0.034825214385986326, 0.03476995086669922, 0.03494089508056641, 0.03478732681274414, 0.03488256072998047, 0.035244033813476565, 0.035007488250732424, 0.03475046539306641, 0.03497062301635742, 0.0347770881652832, 0.03480166244506836, 0.03479964828491211, 0.03495734405517578, 0.03496441650390625, 0.03494911956787109, 0.034885631561279294, 0.034953216552734374, 0.034909183502197266, 0.03480575942993164, 0.03474943923950195, 0.034948097229003904, 0.03509862518310547, 0.03496857452392578, 0.03502284622192383, 0.0344719352722168, 0.03450271987915039, 0.03489888000488281, 0.034909183502197266, 0.03487846374511719, 0.03488460922241211, 0.03492147064208984, 0.03486515045166016, 0.03475251388549805, 0.03486310577392578, 0.03487027359008789, 0.03497369766235352, 0.03508230209350586, 0.034826175689697265, 0.07138098907470704, 0.035064830780029296, 0.035089408874511716, 0.03489894485473633, 0.03474537658691406, 0.034952159881591796, 0.034969600677490234, 0.03484985733032227, 0.0348732795715332, 0.03523481750488281, 0.034993152618408206, 0.03489593505859375, 0.03493983840942383, 0.03493580627441406, 0.034854911804199216, 0.03482316970825195, 0.03493273544311523, 0.034797569274902344, 0.03540787124633789, 0.035068992614746095, 0.03575494384765625, 0.03548672103881836, 0.03483955383300781, 0.03533107376098633, 0.03497062301635742, 0.034800640106201174, 0.034786304473876956, 0.03497574234008789, 0.0349306869506836, 0.03490508651733398, 0.034917377471923826, 0.03463065719604492, 0.034928638458251955, 0.03481292724609375, 0.03505254364013672, 0.03483955383300781, 0.03466854476928711, 0.03482112121582031, 0.03525734329223633, 0.03482726287841797, 0.034948097229003904, 0.034776065826416014, 0.03475558471679688, 0.034283519744873044, 0.03471974563598633, 0.034735103607177735, 0.03482931137084961, 0.03508224105834961, 0.034678783416748044, 0.03488870239257812, 0.034802688598632815, 0.03478015899658203, 0.03476070404052734, 0.03488051223754883, 0.034976768493652347, 0.0347586555480957, 0.03504435348510742, 0.034929695129394533, 0.03516617584228516, 0.034799617767333986, 0.034723838806152346, 0.03487334442138672, 0.03482931137084961, 0.07187558746337891, 0.03538739013671875, 0.03526348876953125, 0.03513241577148438, 0.034925567626953126, 0.03487744140625, 0.0354856948852539, 0.035422206878662106, 0.0351723518371582, 0.034976768493652347, 0.03488460922241211, 0.034890750885009765, 0.0349409294128418, 0.03503206253051758, 0.035076095581054685, 0.03489279937744141, 0.035119102478027346, 0.034729984283447264, 0.03479244613647461, 0.034864158630371095, 0.034802654266357425, 0.0348221435546875, 0.034678783416748044, 0.03501055908203125, 0.03484467315673828, 0.0348487663269043, 0.03479859161376953, 0.03482931137084961, 0.03487539291381836, 0.034783233642578126, 0.03495116806030273, 0.03474534225463867, 0.03493580627441406, 0.03485388946533203, 0.03487948989868164, 0.034926624298095704, 0.034881504058837894, 0.03505254364013672, 0.03485081481933594, 0.03487846374511719, 0.03444224166870117, 0.03474431991577148, 0.03492761611938477, 0.034948097229003904, 0.03510067367553711, 0.0347770881652832, 0.03488972854614258, 0.03482112121582031, 0.034977790832519534, 0.034405376434326174, 0.03482422256469726, 0.034968544006347656, 0.03485081481933594, 0.0349409294128418, 0.034840576171875, 0.034907135009765625, 0.035122177124023435, 0.03496755218505859, 0.03485388946533203, 0.035350528717041016, 0.03549593734741211, 0.03502182388305664, 0.03480473709106445, 0.07150796508789062, 0.0350013427734375, 0.03506995010375977, 0.034994174957275394, 0.03483647918701172, 0.03483135986328125, 0.034991104125976565, 0.03488153457641602, 0.03486310577392578, 0.034756607055664065, 0.034781185150146485, 0.03496038436889649, 0.034710529327392575, 0.03470848083496094, 0.034977790832519534, 0.03480780792236328, 0.03479040145874023, 0.034776065826416014, 0.03490304183959961, 0.03465219116210937, 0.0350074577331543, 0.03491123199462891, 0.03498905563354492, 0.03513139343261719, 0.03480883026123047, 0.03476582336425781, 0.03472281646728516, 0.034713600158691404, 0.034830337524414064, 0.03484364700317383, 0.03500646209716797, 0.034683902740478514, 0.03486310577392578, 0.03489894485473633, 0.034664447784423826, 0.03480575942993164, 0.034835456848144535, 0.03486822509765625, 0.0347770881652832, 0.03489382553100586, 0.03503615951538086, 0.03473715209960938, 0.03480473709106445, 0.034802688598632815, 0.034677761077880856, 0.03475251388549805, 0.03466652679443359, 0.03473097610473633, 0.0347883529663086, 0.035141632080078124, 0.03476070404052734, 0.03492966461181641, 0.03492147064208984, 0.034854911804199216, 0.034776065826416014, 0.03474431991577148, 0.0348037109375, 0.034800640106201174, 0.0348037109375, 0.03454054260253906, 0.03448934555053711, 0.03489791870117188, 0.03465830230712891, 0.07124582672119141, 0.03520102310180664, 0.03487744140625, 0.035156993865966796, 0.03470848083496094, 0.03489996719360351, 0.03482726287841797, 0.03494297790527344, 0.03487744140625, 0.034769920349121096, 0.034885631561279294, 0.03489996719360351, 0.03513756942749023, 0.03485385513305664, 0.03532185745239258, 0.03501875305175781, 0.0368721923828125, 0.03486310577392578, 0.03483443069458008, 0.03476582336425781, 0.03483647918701172, 0.0348221435546875, 0.03477196884155274, 0.034669567108154296, 0.03483647918701172, 0.034947071075439456, 0.0348671989440918, 0.03482316970825195, 0.03477814483642578, 0.03476988983154297, 0.03473612976074219, 0.03470745468139649, 0.03570380783081055, 0.03584921646118164, 0.03489484786987305, 0.03489689636230469, 0.03458969497680664, 0.034830337524414064, 0.03489689636230469, 0.03477811050415039, 0.034705406188964845, 0.03480985641479492, 0.03458662414550781, 0.03588614273071289, 0.03546003341674805, 0.035043327331542966, 0.034993152618408206, 0.03488051223754883, 0.03490611267089844, 0.035004417419433595, 0.03475763320922851, 0.034797569274902344, 0.03473715209960938, 0.03501772689819336, 0.034772991180419925, 0.034885631561279294, 0.03487539291381836, 0.03483443069458008, 0.03482316970825195, 0.0350013427734375, 0.03475763320922851, 0.03494604873657227, 0.0350300178527832]",tokens/s,28.485984529775525,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3047.755776,4521.984,0.0,3875.536896,3575.121408,s,10,3.925874938964844,0.3925874938964844,0.0032003814064900295,0.39131011962890627,0.3974839904785156,0.39779729309082035,0.39804793518066406,"[0.398110595703125, 0.3898521728515625, 0.39046603393554685, 0.39330859375, 0.38967181396484374, 0.38878271484375, 0.39181430053710936, 0.3956484069824219, 0.39741436767578125, 0.3908059387207031]",tokens/s,652.0839404718809,kWh,4.606836531152074e-06,2.5243247530642504e-06,1.9845015875998338e-05,2.697617716021466e-05,tokens/kWh,9489854.640247436,MB,3047.755776,4521.984,0.0,3875.536896,3800.975872,s,10,231.19119335937498,23.119119335937498,0.024652283207228468,23.1104677734375,23.1560455078125,23.16491630859375,23.17201294921875,"[23.104484375, 23.106837890625, 23.11409765625, 23.1257734375, 23.09865234375, 23.091490234375, 23.10330859375, 23.173787109375, 23.15407421875, 23.1186875]",tokens/s,2.7250172934602097,kWh,0.0002730506428477602,0.00014965489141425045,0.0011581440098478151,0.0015808495441098258,tokens/kWh,39851.989858702975,,s,629,234.351453125,0.3725778269077901,0.04661763996043978,0.3668070373535156,0.3683815368652344,0.36919970703125,0.7562686254882812,"[0.3660308532714844, 0.3664025573730469, 0.36736306762695314, 0.3682928771972656, 0.3671849060058594, 0.366424072265625, 0.36691455078125, 0.36642098999023437, 0.3666820983886719, 0.36670053100585936, 0.3671910400390625, 0.3671593017578125, 0.3675473937988281, 0.367383544921875, 0.36641281127929687, 0.3659335632324219, 0.366244873046875, 0.3652311096191406, 0.3665745849609375, 0.3662633056640625, 0.3673968505859375, 0.36597760009765623, 0.3667660827636719, 0.3654379577636719, 0.3664527282714844, 0.3673671569824219, 0.36636468505859376, 0.3656898498535156, 0.3663196105957031, 0.3655997314453125, 0.36726272583007813, 0.36568780517578126, 0.3668695068359375, 0.36599508666992187, 0.3665059204101562, 0.3655762023925781, 0.36666366577148435, 0.36643328857421875, 0.3669176330566406, 0.3657113647460937, 0.36776141357421877, 0.3660533752441406, 0.36670053100585936, 0.36581787109375, 0.36618853759765624, 0.3657318420410156, 0.36655206298828125, 0.3656663818359375, 0.36818527221679687, 0.36655206298828125, 0.3670487060546875, 0.366587890625, 0.3682109375, 0.3672012939453125, 0.3683788757324219, 0.3662612609863281, 0.36835430908203126, 0.3673917541503906, 0.3714969482421875, 0.36665652465820314, 0.3664425048828125, 0.36596124267578123, 0.7560806274414062, 0.36733746337890627, 0.36763238525390624, 0.3667384338378906, 0.3690987548828125, 0.3665008544921875, 0.3659049072265625, 0.3675392150878906, 0.36642611694335936, 0.36708966064453125, 0.36646810913085937, 0.3664025573730469, 0.3674142761230469, 0.36601959228515624, 0.36743270874023437, 0.36704461669921873, 0.36616806030273436, 0.36657357788085937, 0.3658526611328125, 0.3659263916015625, 0.3657093200683594, 0.3664773254394531, 0.3665899658203125, 0.3660308532714844, 0.36575845336914065, 0.3663247375488281, 0.3659059143066406, 0.36675994873046874, 0.3660185546875, 0.3673917541503906, 0.36654180908203127, 0.36684698486328127, 0.367072265625, 0.3671255187988281, 0.36628070068359375, 0.3660789794921875, 0.36613528442382814, 0.3671644287109375, 0.3661455383300781, 0.36799591064453124, 0.36629299926757813, 0.36816485595703125, 0.36593869018554687, 0.3678883972167969, 0.3660892028808594, 0.3684013977050781, 0.36708865356445314, 0.36752896118164063, 0.36656741333007814, 0.3682099304199219, 0.36648345947265626, 0.36751565551757814, 0.3659735107421875, 0.3680624694824219, 0.3659202575683594, 0.3667906494140625, 0.36607489013671873, 0.3667957763671875, 0.36641485595703127, 0.36750439453125, 0.36619058227539064, 0.36707431030273435, 0.36645068359375, 0.7576248168945312, 0.3653406677246094, 0.3671552124023437, 0.3671224365234375, 0.3667906494140625, 0.3663052673339844, 0.36621209716796876, 0.36776141357421877, 0.3668439025878906, 0.36665139770507815, 0.3658373107910156, 0.36659506225585936, 0.36775115966796873, 0.36711117553710937, 0.36632986450195315, 0.36725247192382815, 0.36715213012695314, 0.36897998046875, 0.3676968994140625, 0.3678535766601562, 0.366065673828125, 0.367072265625, 0.36664523315429687, 0.3668899841308594, 0.366271484375, 0.3664445495605469, 0.3664271240234375, 0.366392333984375, 0.36568576049804685, 0.36705279541015623, 0.36768154907226563, 0.36760678100585936, 0.36624383544921874, 0.3671849060058594, 0.36741018676757814, 0.36703640747070315, 0.36581375122070314, 0.36656536865234374, 0.3663052673339844, 0.36876492309570313, 0.366455810546875, 0.3671715698242187, 0.36698419189453124, 0.3677542419433594, 0.36644046020507814, 0.36671795654296874, 0.36616293334960937, 0.36698828125, 0.3662899169921875, 0.36686746215820315, 0.3674449768066406, 0.36675994873046874, 0.366519287109375, 0.3665111083984375, 0.36600628662109375, 0.36778289794921876, 0.3663800354003906, 0.36820684814453125, 0.3660175476074219, 0.36745419311523436, 0.3680143432617187, 0.3678023681640625, 0.3672965087890625, 0.76101123046875, 0.36763442993164064, 0.3688806457519531, 0.3682867126464844, 0.3672862854003906, 0.36792831420898436, 0.36681729125976564, 0.36853555297851565, 0.36648446655273437, 0.36719000244140626, 0.3671961669921875, 0.3679938659667969, 0.3675965576171875, 0.3683921813964844, 0.3668418579101563, 0.3667855224609375, 0.36585470581054685, 0.36641998291015626, 0.3668428649902344, 0.3661527099609375, 0.3681187744140625, 0.3663144836425781, 0.367393798828125, 0.36705484008789063, 0.36674969482421876, 0.36767642211914064, 0.3666595764160156, 0.36638516235351565, 0.3659837341308594, 0.3676375122070312, 0.36701901245117186, 0.36602264404296875, 0.36724429321289065, 0.36727194213867187, 0.3677163391113281, 0.36646194458007814, 0.3668899841308594, 0.36752896118164063, 0.3668070373535156, 0.36688385009765623, 0.3675699157714844, 0.36781158447265627, 0.3655679931640625, 0.3665203247070313, 0.3669176330566406, 0.3664250793457031, 0.36553421020507815, 0.3678494567871094, 0.367056884765625, 0.3667271728515625, 0.3659151306152344, 0.36611276245117186, 0.3676180419921875, 0.3684751281738281, 0.3661475830078125, 0.3675002746582031, 0.3667466125488281, 0.36979815673828126, 0.3675893859863281, 0.3668500366210937, 0.36594073486328127, 0.3664721984863281, 0.3660421142578125, 0.7563417358398438, 0.36537957763671874, 0.3666237487792969, 0.3666851806640625, 0.36812799072265623, 0.3660308532714844, 0.3663882141113281, 0.3670702209472656, 0.3671562194824219, 0.3671490478515625, 0.3683932189941406, 0.36664523315429687, 0.3658895263671875, 0.36612710571289064, 0.36652340698242186, 0.3672749938964844, 0.3655301208496094, 0.366635009765625, 0.3657994384765625, 0.36625100708007813, 0.36786483764648437, 0.3671715698242187, 0.36642816162109376, 0.3672166442871094, 0.3667108459472656, 0.36772549438476565, 0.36702822875976565, 0.367720458984375, 0.3670384521484375, 0.3671142272949219, 0.3670169677734375, 0.369112060546875, 0.36612506103515624, 0.3668643798828125, 0.3657052307128906, 0.3660328979492187, 0.3655086059570313, 0.3667589111328125, 0.3669944458007813, 0.36692889404296875, 0.3655577697753906, 0.3665080261230469, 0.36726373291015624, 0.36662066650390623, 0.3658455505371094, 0.36661654663085935, 0.365918212890625, 0.368047119140625, 0.3663114318847656, 0.36607794189453124, 0.36666778564453123, 0.36623870849609375, 0.36541131591796877, 0.36611993408203125, 0.36540826416015626, 0.36666470336914064, 0.3660861511230469, 0.3677306823730469, 0.3666360168457031, 0.36710296630859374, 0.366376953125, 0.3661721496582031, 0.36556698608398436, 0.75503515625, 0.36811160278320315, 0.3666186218261719, 0.3668746337890625, 0.36598681640625, 0.36689202880859373, 0.3665131530761719, 0.36666983032226563, 0.3660205993652344, 0.3664025573730469, 0.3666462707519531, 0.3671009216308594, 0.3655587768554687, 0.3668203430175781, 0.36630117797851564, 0.36666366577148435, 0.36585574340820315, 0.36627865600585935, 0.3661414489746094, 0.36637799072265625, 0.36607998657226565, 0.3658045349121094, 0.3679395751953125, 0.36621517944335935, 0.36576153564453123, 0.3663124389648437, 0.3655833740234375, 0.3661219787597656, 0.3678760986328125, 0.36634521484375, 0.36614862060546877, 0.3665377197265625, 0.3658229675292969, 0.36720025634765624, 0.3659571228027344, 0.36605746459960936, 0.36572467041015627, 0.36643429565429686, 0.3677470703125, 0.3671142272949219, 0.36663092041015627, 0.36711935424804687, 0.3663636474609375, 0.36681625366210935, 0.3660943298339844, 0.36676812744140624, 0.36628582763671874, 0.3674306640625, 0.3657963562011719, 0.36687359619140625, 0.36616293334960937, 0.3663595581054688, 0.3658577880859375, 0.367494140625, 0.3659980773925781, 0.3674224548339844, 0.3666606140136719, 0.3666483154296875, 0.36678964233398437, 0.367278076171875, 0.3672596435546875, 0.36691558837890625, 0.3659898986816406, 0.7581112060546875, 0.36696063232421877, 0.36686541748046875, 0.3666217041015625, 0.3667189636230469, 0.36658688354492186, 0.36612606811523435, 0.3663329162597656, 0.36610763549804687, 0.3663811340332031, 0.3662243347167969, 0.3669329833984375, 0.3673456726074219, 0.3678023681640625, 0.36764877319335937, 0.3677501525878906, 0.3669678039550781, 0.3668746337890625, 0.3683153991699219, 0.36921240234375, 0.368362548828125, 0.36775827026367186, 0.3663052673339844, 0.3672483825683594, 0.36686746215820315, 0.36620184326171873, 0.36589157104492187, 0.36683877563476563, 0.36737841796875, 0.36710400390625, 0.3661424560546875, 0.36658074951171876, 0.365517822265625, 0.36632781982421875, 0.36605645751953125, 0.366856201171875, 0.36679168701171877, 0.3667793884277344, 0.3662469177246094, 0.36760064697265626, 0.36884786987304685, 0.3667568664550781, 0.3658803100585937, 0.36786996459960936, 0.3664742431640625, 0.3666083984375, 0.36686541748046875, 0.367140869140625, 0.3666790466308594, 0.36664422607421876, 0.365802490234375, 0.36675277709960935, 0.365348876953125, 0.36615576171875, 0.3656755065917969, 0.36651724243164063, 0.3666483154296875, 0.3665623168945312, 0.3659253845214844, 0.36593869018554687, 0.3652372436523437, 0.36640359497070313, 0.3656540222167969, 0.7624765625, 0.36612710571289064, 0.3665633239746094, 0.36761907958984374, 0.36593048095703123, 0.36634112548828124, 0.3662571411132812, 0.3672842102050781, 0.3657646179199219, 0.368606201171875, 0.36768359375, 0.3676794738769531, 0.36858981323242185, 0.3683133544921875, 0.369396728515625, 0.36714599609375, 0.36760577392578125, 0.3668357238769531, 0.3677470703125, 0.3677235107421875, 0.36642098999023437, 0.36621209716796876, 0.3658362731933594, 0.36801739501953123, 0.3667783813476562, 0.3668715515136719, 0.3696394348144531, 0.369375244140625, 0.3691806640625, 0.3698810729980469, 0.37041253662109375, 0.36909466552734377, 0.3689021301269531, 0.3667169189453125, 0.36602264404296875, 0.3657953186035156, 0.3676252136230469, 0.36923904418945314, 0.3685191650390625, 0.3708651428222656, 0.3690188903808594, 0.370050048828125, 0.36955239868164064, 0.366551025390625, 0.3658076171875, 0.3684198303222656, 0.3685857238769531, 0.3687065734863281, 0.36557516479492186, 0.3663943786621094, 0.3689195556640625, 0.3698083801269531, 0.3692380065917969, 0.3681546325683594, 0.3669350280761719, 0.36759039306640623, 0.3672842102050781, 0.3702108154296875, 0.3688243103027344, 0.36716030883789064, 0.36670465087890625, 0.36789248657226564, 0.3689768981933594, 0.7654307861328125, 0.367162353515625, 0.369691650390625, 0.3691734924316406, 0.370017333984375, 0.368702392578125, 0.36607794189453124, 0.36709375, 0.3667589111328125, 0.36747161865234373, 0.3679764404296875, 0.3669626770019531, 0.3658486022949219, 0.36790267944335936, 0.3670026245117187, 0.36758526611328124, 0.3668479919433594, 0.36762625122070314, 0.36738970947265626, 0.36922470092773435, 0.36768768310546873, 0.36928103637695314, 0.3660902404785156, 0.36697601318359374, 0.36616705322265625, 0.3675197448730469, 0.36707635498046876, 0.3673385009765625, 0.3668746337890625, 0.36747161865234373, 0.36656536865234374, 0.36796722412109373, 0.36610662841796876, 0.3665213317871094, 0.365781005859375, 0.36651007080078124, 0.36767025756835936, 0.3668213806152344, 0.36538470458984373, 0.3673118591308594, 0.365907958984375, 0.3671910400390625, 0.3668971557617188, 0.3673313293457031, 0.3669053344726563, 0.3664783935546875, 0.36691961669921874, 0.36783718872070315, 0.36801126098632814, 0.36738970947265626, 0.36602264404296875, 0.36634521484375, 0.3659909057617188, 0.36675787353515626, 0.3704688720703125, 0.37160037231445314, 0.370060302734375, 0.3701022644042969, 0.36780850219726563, 0.36857342529296877, 0.3668899841308594, 0.36739788818359376, 0.3669186706542969, 0.7624959716796875, 0.366529541015625, 0.367541259765625, 0.3666298828125, 0.367783935546875, 0.36849972534179687, 0.36857550048828125, 0.36766720581054685, 0.3665428466796875, 0.367025146484375, 0.3665684509277344, 0.3679231872558594, 0.3658014831542969, 0.3674449768066406, 0.36611276245117186, 0.367678466796875, 0.3667189636230469, 0.3687383117675781, 0.36729037475585935, 0.3665848388671875, 0.36641177368164063, 0.36767333984375, 0.3674972229003906, 0.36638516235351565, 0.3664025573730469, 0.36736306762695314, 0.3676334228515625, 0.3689072570800781, 0.3667712097167969, 0.3672862854003906, 0.3658874816894531, 0.36724429321289065, 0.3662274475097656, 0.3670732727050781, 0.3663523864746094, 0.3670425720214844, 0.3662489624023437, 0.36671282958984375, 0.3670978698730469, 0.3670241394042969, 0.3659970703125, 0.3673231506347656, 0.3663739013671875, 0.3662264404296875, 0.36703436279296875, 0.36681729125976564, 0.3661414489746094, 0.3668623352050781, 0.366129150390625, 0.367130615234375, 0.36584756469726565, 0.3665489807128906, 0.3668070373535156, 0.3673395080566406, 0.3674347534179688, 0.3669698486328125, 0.36617010498046876, 0.3672535095214844, 0.36757708740234374, 0.3671152648925781, 0.3667189636230469, 0.3670374450683594, 0.3659479064941406]",tokens/s,2.684002986166677,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31df-67a8d8633d1fac8f02167bc7;da36568e-8f20-49bf-a759-049af42e7ba7) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2110.81216,2911.371264,0.0,2264.92416,2140.859392,s,10,2.5174565429687497,0.25174565429687495,0.002151641767107536,0.250954475402832,0.2536562973022461,0.2552590721130371,0.2565412919616699,"[0.25686184692382813, 0.2507518005371094, 0.24988601684570313, 0.2505283203125, 0.2511571502685547, 0.24977813720703124, 0.24948822021484374, 0.2533001251220703, 0.25277372741699217, 0.25293119812011716]",tokens/s,1016.8993809049352,kWh,2.9486055983114377e-06,1.615695148140686e-06,1.3450261437707742e-05,1.801456218415987e-05,tokens/kWh,14210725.599820558,MB,2113.232896,2911.371264,0.0,2264.92416,2246.908928,s,10,147.68545214843752,14.768545214843751,0.008287027024456562,14.7677890625,14.7791720703125,14.7827984375,14.785699531250001,"[14.7716796875, 14.761337890625, 14.760306640625, 14.757224609375, 14.7783662109375, 14.769251953125, 14.7864248046875, 14.7672763671875, 14.7652822265625, 14.7683017578125]",tokens/s,4.265823009884493,kWh,0.0001741533532096603,9.545023489099372e-05,0.0007804473459020919,0.0010500509340027457,tokens/kWh,59997.08962673545,,s,629,149.7085112609864,0.23801035176627394,0.029876551877239144,0.2342451171875,0.23526665954589845,0.2357182434082031,0.48483614379882817,"[0.23634022521972656, 0.23580979919433595, 0.23378636169433595, 0.2359173126220703, 0.23436288452148438, 0.2340474853515625, 0.23413555908203126, 0.23381607055664064, 0.23380070495605468, 0.23397683715820314, 0.23489945983886718, 0.23450828552246095, 0.23392665100097657, 0.2347868194580078, 0.23529164123535157, 0.2351278076171875, 0.23502951049804688, 0.23457279968261718, 0.23408128356933594, 0.2355589141845703, 0.2354534454345703, 0.23438336181640626, 0.23522406005859375, 0.23521279907226564, 0.23508070373535156, 0.23463629150390625, 0.23408230590820311, 0.2338928680419922, 0.23402598571777344, 0.23431475830078125, 0.2341826629638672, 0.2337505340576172, 0.23392562866210936, 0.23407513427734375, 0.23401370239257813, 0.23463833618164062, 0.2346096649169922, 0.234144775390625, 0.23394610595703125, 0.2363443145751953, 0.2340720672607422, 0.23385702514648438, 0.23410585021972657, 0.233997314453125, 0.23401370239257813, 0.23508172607421876, 0.23504486083984374, 0.23416934204101564, 0.23398399353027344, 0.2338693084716797, 0.23474688720703124, 0.23387954711914063, 0.23373619079589844, 0.23434751892089845, 0.23412428283691405, 0.23393894958496095, 0.2339563446044922, 0.23393997192382812, 0.23378125, 0.23382528686523438, 0.23372799682617187, 0.23389593505859374, 0.4848691101074219, 0.23401983642578125, 0.23492710876464845, 0.23426150512695312, 0.2343546905517578, 0.2339911651611328, 0.2339113006591797, 0.23409356689453126, 0.23416627502441406, 0.23411199951171874, 0.2339788818359375, 0.23402496337890624, 0.23383450317382812, 0.23386317443847657, 0.234029052734375, 0.2345779266357422, 0.23393177795410156, 0.23390617370605468, 0.234102783203125, 0.234682373046875, 0.2339594268798828, 0.23381196594238282, 0.2337935333251953, 0.23399935913085937, 0.23403213500976563, 0.23468646240234375, 0.23564390563964843, 0.2349363250732422, 0.23442739868164061, 0.2346414031982422, 0.23534591674804686, 0.23548109436035156, 0.23447039794921876, 0.23574732971191406, 0.23480320739746094, 0.2339983367919922, 0.23404850769042967, 0.23442739868164061, 0.23391641235351562, 0.2341724090576172, 0.23490765380859374, 0.2338805694580078, 0.23490252685546875, 0.23385600280761717, 0.23409561157226563, 0.2344857635498047, 0.2364610595703125, 0.23387852478027343, 0.23375360107421875, 0.23386521911621094, 0.2337822723388672, 0.2338170928955078, 0.23385804748535155, 0.23377407836914063, 0.23408230590820311, 0.2339430389404297, 0.234102783203125, 0.2338928680419922, 0.2352168884277344, 0.2338314208984375, 0.23406898498535156, 0.23446630859375, 0.23422157287597656, 0.4860119018554688, 0.2341160888671875, 0.2342451171875, 0.2343505859375, 0.2357442626953125, 0.23456153869628907, 0.23385906982421875, 0.2339041290283203, 0.2340095977783203, 0.23387545776367188, 0.23382733154296875, 0.2345359344482422, 0.23387750244140626, 0.23376690673828124, 0.23402598571777344, 0.23404544067382813, 0.2340095977783203, 0.23381503295898437, 0.23389695739746094, 0.23463833618164062, 0.23435673522949219, 0.23512678527832032, 0.2348257293701172, 0.2361292724609375, 0.23491583251953124, 0.23533772277832032, 0.23424716186523437, 0.23495065307617188, 0.233818115234375, 0.2338734130859375, 0.23402496337890624, 0.23422872924804689, 0.2353070068359375, 0.23398912048339843, 0.23424000549316407, 0.23436390686035155, 0.23477760314941407, 0.23421133422851562, 0.23425331115722656, 0.23424409484863282, 0.23404544067382813, 0.2338621368408203, 0.23387135314941407, 0.2337925109863281, 0.23464857482910156, 0.23441407775878906, 0.2338365478515625, 0.2360289306640625, 0.23435264587402344, 0.23396249389648438, 0.23382424926757814, 0.23385600280761717, 0.23380787658691407, 0.2338191375732422, 0.23494041442871094, 0.23401164245605469, 0.2339215393066406, 0.2339051513671875, 0.23380685424804687, 0.23376588439941406, 0.23408639526367186, 0.23465472412109376, 0.23369830322265625, 0.48475137329101564, 0.23387852478027343, 0.23382937622070313, 0.23382220458984376, 0.23377714538574218, 0.23383244323730468, 0.23383244323730468, 0.23393792724609375, 0.23368499755859376, 0.233818115234375, 0.23384474182128906, 0.23382118225097656, 0.2341908416748047, 0.23382220458984376, 0.2341754913330078, 0.2348564453125, 0.23380685424804687, 0.23420314025878905, 0.23381094360351562, 0.23416831970214844, 0.23417855834960938, 0.2338488311767578, 0.2338140106201172, 0.23392256164550781, 0.23402803039550782, 0.2338140106201172, 0.234967041015625, 0.23461068725585937, 0.2341754913330078, 0.23411097717285156, 0.23385906982421875, 0.23410995483398436, 0.23426661682128908, 0.23414886474609375, 0.23437004089355468, 0.2345041961669922, 0.23398809814453125, 0.23385292053222656, 0.23385906982421875, 0.23410995483398436, 0.2341386260986328, 0.23388978576660155, 0.2344837188720703, 0.23398809814453125, 0.23559475708007813, 0.2338191375732422, 0.2342256622314453, 0.23473458862304689, 0.23459942626953126, 0.23494554138183593, 0.23424000549316407, 0.23441714477539063, 0.23474380493164063, 0.23442842102050782, 0.23444992065429687, 0.2339368896484375, 0.23582514953613282, 0.2339532775878906, 0.23572274780273436, 0.2348922882080078, 0.23533465576171875, 0.23438438415527343, 0.23461785888671874, 0.4860794982910156, 0.23520664978027345, 0.23465676879882813, 0.23474688720703124, 0.23444070434570313, 0.23441510009765626, 0.23433421325683593, 0.23486770629882814, 0.23470387268066406, 0.23463424682617187, 0.23388978576660155, 0.23490663146972657, 0.23415705871582032, 0.23375666809082032, 0.23418675231933594, 0.23416114807128907, 0.23495986938476562, 0.23515545654296874, 0.23488922119140626, 0.2340095977783203, 0.23481138610839844, 0.23467724609375, 0.23522201538085938, 0.23416831970214844, 0.23409767150878907, 0.23391949462890624, 0.2341826629638672, 0.2353428497314453, 0.23429426574707032, 0.23422361755371093, 0.23447756958007812, 0.23421542358398437, 0.2339983367919922, 0.23531520080566407, 0.2346967010498047, 0.2343055419921875, 0.23412838745117187, 0.23422976684570312, 0.23411712646484376, 0.2343055419921875, 0.2340843505859375, 0.234566650390625, 0.23416934204101564, 0.2340843505859375, 0.23419596862792968, 0.23391232299804687, 0.23488613891601562, 0.23576576232910157, 0.23572377014160156, 0.23397683715820314, 0.23519743347167968, 0.23507557678222657, 0.2344837188720703, 0.23419187927246093, 0.23482879638671875, 0.234134521484375, 0.23423487854003905, 0.23815577697753906, 0.2349475860595703, 0.234608642578125, 0.23501414489746095, 0.2343126983642578, 0.23413145446777345, 0.484052978515625, 0.23418675231933594, 0.23383552551269532, 0.233818115234375, 0.2337884216308594, 0.23386009216308593, 0.23376998901367188, 0.23425535583496093, 0.23407923889160157, 0.23388365173339845, 0.23385702514648438, 0.23382528686523438, 0.2338170928955078, 0.23406182861328126, 0.2349547576904297, 0.2348021697998047, 0.2346516418457031, 0.23388160705566408, 0.23406387329101563, 0.23462911987304688, 0.23546675109863283, 0.2349168701171875, 0.23516160583496093, 0.23408741760253907, 0.23404135131835938, 0.23388876342773438, 0.2348124084472656, 0.23441407775878906, 0.23469465637207032, 0.23460762023925782, 0.23387443542480468, 0.23417446899414063, 0.23448883056640624, 0.23504998779296876, 0.23461068725585937, 0.2344058837890625, 0.2338805694580078, 0.23380274963378905, 0.23606375122070314, 0.23502951049804688, 0.2342696990966797, 0.2343864288330078, 0.23570431518554688, 0.23431884765625, 0.23458610534667967, 0.23478271484375, 0.23399320983886718, 0.23410176086425782, 0.23421235656738282, 0.2348636169433594, 0.23454310607910156, 0.23408026123046874, 0.2343055419921875, 0.23477145385742187, 0.23488204956054687, 0.23403724670410156, 0.23455743408203125, 0.2344806365966797, 0.23533567810058595, 0.23407411193847658, 0.23458099365234375, 0.2344069061279297, 0.2347008056640625, 0.4853893127441406, 0.23457997131347658, 0.23390719604492188, 0.23476838684082033, 0.23429324340820312, 0.23598899841308593, 0.2341908416748047, 0.23444070434570313, 0.23391641235351562, 0.23489126586914064, 0.23460453796386718, 0.2361405487060547, 0.23541043090820313, 0.2344110107421875, 0.23417958068847655, 0.23468031311035156, 0.23411302185058594, 0.23393075561523438, 0.23403724670410156, 0.23467213439941406, 0.23507046508789062, 0.2341273651123047, 0.23511961364746095, 0.23591935729980468, 0.23496397399902344, 0.23582514953613282, 0.23585279846191406, 0.234498046875, 0.23445606994628906, 0.2352015380859375, 0.23556710815429688, 0.23550361633300781, 0.23390719604492188, 0.2345175018310547, 0.23411814880371093, 0.23459738159179688, 0.2349291534423828, 0.23403826904296876, 0.2339665985107422, 0.2354534454345703, 0.23551487731933593, 0.2355968017578125, 0.2346639404296875, 0.23512371826171874, 0.23418060302734375, 0.23528550720214844, 0.2347694091796875, 0.2348185577392578, 0.23459327697753907, 0.2342686767578125, 0.2342451171875, 0.23486463928222656, 0.23477247619628908, 0.23453797912597657, 0.23439974975585937, 0.2347448272705078, 0.2346782684326172, 0.23409767150878907, 0.2341160888671875, 0.2345482177734375, 0.2349998016357422, 0.23420620727539063, 0.23429632568359374, 0.487478271484375, 0.23422157287597656, 0.2341580810546875, 0.2345779266357422, 0.23406285095214843, 0.23428608703613282, 0.23372492980957033, 0.23382118225097656, 0.23385498046875, 0.23446015930175781, 0.23385804748535155, 0.2345359344482422, 0.2342328338623047, 0.23421644592285157, 0.23417446899414063, 0.23571148681640625, 0.23593370056152344, 0.23512165832519533, 0.23528550720214844, 0.23476223754882813, 0.23465983581542968, 0.2345216064453125, 0.2348072967529297, 0.23432908630371094, 0.23471615600585938, 0.23378125, 0.2351810607910156, 0.2338682861328125, 0.23424716186523437, 0.23442431640625, 0.23430758666992188, 0.23453388977050782, 0.2349547576904297, 0.2341376037597656, 0.2346414031982422, 0.2344806365966797, 0.23443865966796876, 0.23413555908203126, 0.23448268127441407, 0.23451443481445314, 0.23403929138183593, 0.23566233825683594, 0.23403622436523439, 0.23466085815429688, 0.23410687255859375, 0.23414579772949218, 0.2337955780029297, 0.23434034729003905, 0.23408741760253907, 0.23395840454101563, 0.23467520141601564, 0.23385292053222656, 0.234287109375, 0.23367372131347655, 0.23419801330566406, 0.23389695739746094, 0.23498240661621095, 0.23478067016601561, 0.23465061950683594, 0.2337198028564453, 0.2345113525390625, 0.23452978515625, 0.23437619018554687, 0.48710348510742185, 0.23419290161132814, 0.23404953002929688, 0.23400653076171876, 0.23467213439941406, 0.23747993469238282, 0.23400857543945314, 0.2344908752441406, 0.23426559448242187, 0.23419903564453126, 0.23388978576660155, 0.23458099365234375, 0.23397273254394532, 0.23450009155273438, 0.23396044921875, 0.23411712646484376, 0.23380787658691407, 0.23464755249023436, 0.23406387329101563, 0.23385292053222656, 0.23458201599121092, 0.23409971618652345, 0.2341406707763672, 0.23461273193359375, 0.23395840454101563, 0.23406080627441406, 0.2345912322998047, 0.2337884216308594, 0.23409356689453126, 0.23391743469238283, 0.23382424926757814, 0.23414988708496093, 0.2346895294189453, 0.2340044860839844, 0.23400344848632812, 0.23432704162597656, 0.23545549011230468, 0.234819580078125, 0.23408332824707032, 0.23495884704589845, 0.234134521484375, 0.23427583312988282, 0.23382528686523438, 0.23380992126464845, 0.23423078918457033, 0.23484005737304686, 0.23429119873046875, 0.23448678588867186, 0.23548927307128906, 0.23426559448242187, 0.23432908630371094, 0.23404953002929688, 0.23414579772949218, 0.23459942626953126, 0.23440896606445313, 0.23396351623535155, 0.23510426330566406, 0.23443251037597657, 0.23493734741210937, 0.23475814819335938, 0.23529983520507813, 0.23406387329101563, 0.23430758666992188, 0.48767181396484377, 0.23440896606445313, 0.23380992126464845, 0.23463116455078126, 0.23454617309570314, 0.23481651306152343, 0.23389797973632812, 0.23491993713378906, 0.2345707550048828, 0.23445504760742186, 0.23386317443847657, 0.234925048828125, 0.23413349914550782, 0.23387443542480468, 0.23547187805175782, 0.2346229705810547, 0.23446015930175781, 0.2339246063232422, 0.23416831970214844, 0.23383244323730468, 0.23388365173339845, 0.23380685424804687, 0.23409152221679688, 0.23424000549316407, 0.23371160888671874, 0.23394508361816407, 0.23730380249023436, 0.23463526916503907, 0.2349864959716797, 0.2340843505859375, 0.23448678588867186, 0.234819580078125, 0.23532952880859376, 0.23434547424316407, 0.23549746704101562, 0.2339911651611328, 0.2340966339111328, 0.2338170928955078, 0.23386624145507812, 0.2344622039794922, 0.2341201934814453, 0.234745849609375, 0.2340843505859375, 0.23513189697265624, 0.23424307250976562, 0.23459225463867187, 0.23526194763183594, 0.23420109558105467, 0.23379148864746094, 0.23442124938964845, 0.2343987274169922, 0.2351595458984375, 0.23452774047851563, 0.23387852478027343, 0.233818115234375, 0.23384780883789064, 0.23484825134277343, 0.234134521484375, 0.2350243835449219, 0.2341396484375, 0.23421029663085938, 0.23383450317382812, 0.23446015930175781]",tokens/s,4.2014979288884025,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1459.101696,2483.552256,0.0,1837.105152,1664.652288,s,10,1.3924120025634767,0.13924120025634767,0.0012999906015036696,0.139004638671875,0.1399875732421875,0.14133694152832033,0.14241643615722657,"[0.14268630981445313, 0.13829638671875, 0.13825672912597656, 0.13859184265136718, 0.13820169067382812, 0.13813597106933595, 0.13968771362304688, 0.13956375122070314, 0.1395741729736328, 0.13941743469238282]",tokens/s,1838.5362919071044,kWh,1.625099111365401e-06,8.904696633330225e-07,6.785176661013463e-06,9.300745435711887e-06,tokens/kWh,27524675.497196376,MB,1459.101696,2483.552256,0.0,1837.105152,1763.594752,s,10,82.5780146484375,8.25780146484375,0.007490083700840557,8.258056640625,8.26394970703125,8.268834228515626,8.272741845703125,"[8.261927734375, 8.246669921875, 8.2540546875, 8.2559892578125, 8.2628642578125, 8.261189453125, 8.27371875, 8.2601240234375, 8.2488525390625, 8.2526240234375]",tokens/s,7.629149267902877,kWh,9.755284506867285e-05,5.34662001699176e-05,0.00039530770056858775,0.0005463267458071782,tokens/kWh,115315.60642691902,,s,629,83.69452130126959,0.13305965230726477,0.01654754583003512,0.13084364318847655,0.13196041564941405,0.1323978759765625,0.2692352648925781,"[0.13379481506347657, 0.13254144287109376, 0.13190042114257813, 0.1307463684082031, 0.13086207580566406, 0.13051699829101562, 0.13088563537597656, 0.13059686279296875, 0.1305753631591797, 0.1306071014404297, 0.13055078125, 0.13059071350097656, 0.13054771423339845, 0.13065216064453125, 0.13135154724121093, 0.13138841247558594, 0.13123583984375, 0.13066546630859374, 0.13075045776367186, 0.13078834533691405, 0.130555908203125, 0.13037977600097655, 0.13052517700195312, 0.1304883270263672, 0.1306112060546875, 0.13059686279296875, 0.13050572204589844, 0.13065011596679688, 0.13144883728027343, 0.13071359252929687, 0.13058047485351562, 0.13251072692871094, 0.1310064697265625, 0.13063475036621094, 0.1306859588623047, 0.1307494354248047, 0.13061734008789064, 0.130661376953125, 0.13066035461425782, 0.13148672485351562, 0.13165568542480469, 0.13089280700683595, 0.13063372802734374, 0.13172633361816405, 0.13140377807617187, 0.13122969055175782, 0.13104946899414063, 0.13110272216796875, 0.13083135986328126, 0.13193215942382813, 0.13141708374023436, 0.13122560119628907, 0.1308784637451172, 0.13365965270996094, 0.13181951904296876, 0.13121638488769533, 0.13081907653808594, 0.13171200561523438, 0.1312593994140625, 0.13223219299316405, 0.13111807250976562, 0.13094400024414063, 0.2691379699707031, 0.13103199768066406, 0.1306234893798828, 0.13054873657226562, 0.13060812377929687, 0.1306552276611328, 0.13165773010253906, 0.13123992919921876, 0.13063168334960937, 0.13058560180664064, 0.13068389892578125, 0.13071565246582031, 0.1307463684082031, 0.13084466552734375, 0.1307125701904297, 0.13099623107910155, 0.13122764587402344, 0.1306798095703125, 0.1305128936767578, 0.1305200653076172, 0.13082931518554688, 0.13104742431640626, 0.13062757873535155, 0.13068800354003907, 0.13034291076660157, 0.1306429443359375, 0.1306183624267578, 0.13080677795410156, 0.13188607788085938, 0.13072589111328126, 0.13059686279296875, 0.13140786743164062, 0.13105459594726562, 0.13074227905273436, 0.13063987731933593, 0.13232537841796874, 0.1305538635253906, 0.13083340454101564, 0.1308590087890625, 0.13096754455566406, 0.13071974182128906, 0.1309388732910156, 0.13309132385253905, 0.13125836181640624, 0.13162495422363282, 0.1309450225830078, 0.13123890686035156, 0.13122969055175782, 0.13076173400878907, 0.13033779907226561, 0.13071974182128906, 0.1306808319091797, 0.13068389892578125, 0.13058355712890626, 0.1307535400390625, 0.13082521057128907, 0.13138330078125, 0.13090815734863281, 0.1307678680419922, 0.13069209289550782, 0.13084774780273437, 0.13068287658691408, 0.13077197265625, 0.26904986572265627, 0.13090304565429686, 0.13155123901367188, 0.1310064697265625, 0.13056512451171876, 0.1314580535888672, 0.13161677551269532, 0.13072178649902344, 0.13050572204589844, 0.1319393310546875, 0.1306808319091797, 0.13063168334960937, 0.13043609619140625, 0.13061427307128906, 0.13054566955566407, 0.13078732299804688, 0.130440185546875, 0.1307043914794922, 0.13066957092285156, 0.13071052551269532, 0.1307473907470703, 0.1307606964111328, 0.130587646484375, 0.132389892578125, 0.13151539611816407, 0.13108224487304687, 0.1304524841308594, 0.13092556762695312, 0.1307709503173828, 0.13067878723144533, 0.13066444396972657, 0.13064601135253906, 0.13057638549804687, 0.13061222839355469, 0.13061734008789064, 0.13069517517089843, 0.1307484130859375, 0.13075045776367186, 0.13066752624511718, 0.13076991271972657, 0.13184307861328126, 0.13099417114257814, 0.13157273864746094, 0.1309870147705078, 0.13192396545410157, 0.13073715209960937, 0.1307791290283203, 0.13263565063476562, 0.1323888702392578, 0.13131365966796876, 0.13071359252929687, 0.13122764587402344, 0.13066854858398438, 0.13092250061035157, 0.13190963745117187, 0.1317898254394531, 0.13199052429199218, 0.1315246124267578, 0.13117645263671876, 0.13117439270019532, 0.13063679504394532, 0.1306071014404297, 0.13064601135253906, 0.27082650756835935, 0.1319393310546875, 0.13072076416015624, 0.13070233154296876, 0.13060914611816407, 0.1306982421875, 0.1306941375732422, 0.1307361297607422, 0.13072384643554688, 0.13180825805664062, 0.13077708435058594, 0.13068902587890624, 0.1307166748046875, 0.1306112060546875, 0.13075456237792968, 0.13076480102539062, 0.1316259765625, 0.13118873596191405, 0.13114265441894532, 0.13099110412597656, 0.13076377868652345, 0.13220045471191405, 0.1319833526611328, 0.131051513671875, 0.1304217529296875, 0.1321359405517578, 0.13102284240722656, 0.13118975830078125, 0.13145599365234376, 0.13123890686035156, 0.13081805419921874, 0.13088050842285157, 0.13064396667480468, 0.13075967407226563, 0.1305620422363281, 0.13079756164550782, 0.1306480712890625, 0.13070130920410156, 0.13066546630859374, 0.13078323364257813, 0.13063168334960937, 0.13063372802734374, 0.1307361297607422, 0.13077810668945314, 0.1324031982421875, 0.1307852783203125, 0.13074227905273436, 0.13091635131835938, 0.1307842559814453, 0.13073408508300782, 0.13077606201171876, 0.13099725341796875, 0.1314017333984375, 0.13113139343261718, 0.13084774780273437, 0.1308579864501953, 0.1312112579345703, 0.1308897247314453, 0.13232537841796874, 0.13256089782714844, 0.1308651580810547, 0.13114060974121095, 0.13100236511230468, 0.2692731018066406, 0.13084364318847655, 0.1314017333984375, 0.13194137573242187, 0.1307535400390625, 0.13124607849121095, 0.1310320587158203, 0.13100338745117188, 0.13084979248046874, 0.130735107421875, 0.13073408508300782, 0.13077810668945314, 0.13074021911621095, 0.13047500610351562, 0.13073100280761718, 0.1308590087890625, 0.1306746826171875, 0.1320499267578125, 0.13072793579101563, 0.13061222839355469, 0.13069004821777344, 0.13063475036621094, 0.13072486877441405, 0.1314580535888672, 0.13120716857910156, 0.13121434020996095, 0.13137100219726563, 0.1323335723876953, 0.13257215881347656, 0.13070130920410156, 0.13072280883789061, 0.13117543029785156, 0.1306480712890625, 0.1327298583984375, 0.13261415100097657, 0.13112832641601563, 0.1316864013671875, 0.1313607635498047, 0.13099008178710939, 0.13083135986328126, 0.1309696044921875, 0.13129830932617187, 0.13074227905273436, 0.13066444396972657, 0.13106892395019532, 0.13083340454101564, 0.13119078063964842, 0.13266432189941407, 0.13248716735839844, 0.13077503967285156, 0.1307914276123047, 0.13080677795410156, 0.13104946899414063, 0.13092556762695312, 0.13065216064453125, 0.1306480712890625, 0.13058969116210936, 0.13318450927734374, 0.1311262664794922, 0.13230592346191405, 0.13067059326171876, 0.13073408508300782, 0.13068185424804687, 0.27066061401367186, 0.1313095703125, 0.13158502197265626, 0.13086822509765625, 0.13064703369140626, 0.1305917510986328, 0.13066957092285156, 0.13082009887695312, 0.13070950317382812, 0.13068800354003907, 0.1308579864501953, 0.13097062683105468, 0.13119488525390624, 0.13108735656738282, 0.1311057891845703, 0.13334323120117186, 0.13187992858886718, 0.13169049072265626, 0.13072793579101563, 0.13088359069824218, 0.13073919677734375, 0.13076582336425782, 0.1306378173828125, 0.13069926452636718, 0.13055999755859374, 0.13076991271972657, 0.1307361297607422, 0.1307115478515625, 0.13066444396972657, 0.1318144073486328, 0.13064601135253906, 0.1307914276123047, 0.13064396667480468, 0.13078221130371093, 0.13085696411132813, 0.13073408508300782, 0.13069004821777344, 0.13040435791015625, 0.13216152954101562, 0.13084774780273437, 0.13076275634765624, 0.1308784637451172, 0.13086822509765625, 0.1307484130859375, 0.13084364318847655, 0.13232025146484375, 0.13216461181640626, 0.1316546630859375, 0.13190963745117187, 0.13114572143554687, 0.13153176879882814, 0.131056640625, 0.13069107055664062, 0.13102284240722656, 0.13223219299316405, 0.13078630065917968, 0.13118258666992189, 0.1313228759765625, 0.13151641845703124, 0.13187481689453126, 0.13146418762207032, 0.13131263732910156, 0.13112115478515626, 0.2720235595703125, 0.13148159790039063, 0.1317724151611328, 0.13106072998046875, 0.13132492065429688, 0.13174169921875, 0.13085285949707032, 0.13085183715820312, 0.1307484130859375, 0.13077503967285156, 0.13059071350097656, 0.13101158142089844, 0.13111500549316407, 0.13174578857421876, 0.1308159942626953, 0.13084466552734375, 0.13157171630859374, 0.13134848022460938, 0.13074432373046874, 0.13075045776367186, 0.13068800354003907, 0.13058969116210936, 0.13147442626953126, 0.13185433959960938, 0.130872314453125, 0.13104537963867188, 0.13076991271972657, 0.13386341857910156, 0.132885498046875, 0.13136895751953126, 0.1308590087890625, 0.13111091613769532, 0.13062757873535155, 0.1306480712890625, 0.13030911254882813, 0.13067263793945313, 0.13060096740722657, 0.13076582336425782, 0.13124710083007812, 0.13097164916992188, 0.13212261962890626, 0.13122047424316408, 0.1317918701171875, 0.13056410217285155, 0.13102079772949218, 0.13193011474609376, 0.13195468139648436, 0.13295513916015625, 0.13061427307128906, 0.13063270568847657, 0.13238067626953126, 0.13169151306152344, 0.13129830932617187, 0.13212364196777343, 0.13201408386230468, 0.1310709686279297, 0.13222093200683593, 0.1328711700439453, 0.13222195434570314, 0.13068287658691408, 0.1308057556152344, 0.13097779846191407, 0.13187481689453126, 0.271072265625, 0.13202432250976562, 0.13072998046875, 0.13133721923828126, 0.1315010528564453, 0.13107609558105468, 0.13116006469726563, 0.1324267578125, 0.1318338623046875, 0.13117543029785156, 0.13102694702148437, 0.13115699768066405, 0.1313720245361328, 0.13114572143554687, 0.13089791870117187, 0.13081497192382813, 0.1320273895263672, 0.13255679321289063, 0.13251686096191406, 0.13236122131347655, 0.13119488525390624, 0.13082009887695312, 0.130629638671875, 0.1312522277832031, 0.13159219360351562, 0.131557373046875, 0.13065113830566405, 0.13070233154296876, 0.13060403442382812, 0.13083750915527342, 0.13064396667480468, 0.13056614685058593, 0.13110272216796875, 0.13064909362792967, 0.13075967407226563, 0.13072486877441405, 0.13138330078125, 0.13096754455566406, 0.13072076416015624, 0.13083241271972657, 0.13185635375976562, 0.13205708312988282, 0.1308344268798828, 0.1318041534423828, 0.1312798767089844, 0.13098086547851562, 0.13061529541015626, 0.1309071350097656, 0.13069107055664062, 0.13145703125, 0.13063372802734374, 0.13061016845703124, 0.13082623291015624, 0.13071871948242186, 0.13061529541015626, 0.1307125701904297, 0.13055897521972656, 0.13065728759765624, 0.13056410217285155, 0.13088255310058594, 0.13070335388183593, 0.130735107421875, 0.1306234893798828, 0.27049368286132813, 0.1308221435546875, 0.13063679504394532, 0.13080166625976564, 0.1306071014404297, 0.13121945190429687, 0.1331988525390625, 0.1310433349609375, 0.13162701416015626, 0.1306234893798828, 0.13115391540527344, 0.13173452758789062, 0.13072793579101563, 0.1314897918701172, 0.13150413513183593, 0.1313218536376953, 0.1311191101074219, 0.13160960388183593, 0.13073817443847657, 0.1321881561279297, 0.13074227905273436, 0.1307494354248047, 0.13070233154296876, 0.13128807067871093, 0.1311068115234375, 0.1307729949951172, 0.1307146301269531, 0.1308078155517578, 0.13059788513183593, 0.13057638549804687, 0.13050982666015626, 0.13075762939453126, 0.13053439331054686, 0.13056614685058593, 0.13058355712890626, 0.13062144470214843, 0.1315635223388672, 0.13227008056640624, 0.13068698120117186, 0.13062451171875, 0.130693115234375, 0.1305917510986328, 0.13058457946777344, 0.13053439331054686, 0.13104229736328124, 0.13112832641601563, 0.13049139404296875, 0.13087026977539062, 0.13059686279296875, 0.13072793579101563, 0.13054771423339845, 0.13068185424804687, 0.1312030792236328, 0.13103411865234374, 0.13126451110839843, 0.13072998046875, 0.13075456237792968, 0.13058969116210936, 0.13065933227539062, 0.13062144470214843, 0.1305753631591797, 0.13052517700195312, 0.13057331848144532, 0.2703288269042969, 0.13065420532226563, 0.13157785034179686, 0.13085285949707032, 0.130619384765625, 0.13085594177246093, 0.130735107421875, 0.13151129150390625, 0.13070335388183593, 0.13061734008789064, 0.1304698944091797, 0.1307709503173828, 0.1307484130859375, 0.13094297790527343, 0.1308395538330078, 0.130735107421875, 0.13068185424804687, 0.13075660705566405, 0.13218304443359374, 0.1309634552001953, 0.13148159790039063, 0.13058253479003906, 0.1310627899169922, 0.1309020233154297, 0.13047193908691407, 0.13053030395507811, 0.13151846313476562, 0.1308170166015625, 0.13100338745117188, 0.1308968963623047, 0.13076889038085937, 0.1309644775390625, 0.13093785095214844, 0.1307740173339844, 0.13066648864746094, 0.13088665771484376, 0.1311293487548828, 0.13094706726074218, 0.13092250061035157, 0.13099314880371093, 0.13093785095214844, 0.13083135986328126, 0.1308078155517578, 0.13092250061035157, 0.13095321655273437, 0.13083544921875, 0.13072691345214843, 0.13097471618652343, 0.1313228759765625, 0.13215846252441407, 0.13237759399414062, 0.1312788543701172, 0.13164134216308593, 0.1313638458251953, 0.1314334716796875, 0.13113446044921875, 0.1308159942626953, 0.13188607788085938, 0.1307361297607422, 0.1308733367919922, 0.13075558471679688, 0.13074534606933594, 0.1307606964111328]",tokens/s,7.515426221697727,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-6677946c-681457a30d6d6bb827846dfa;724f5956-71ff-4f73-997f-2fa17b30a678) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1562.181632,1957.167104,0.0,1310.72,1163.955712,s,10,1.3245970916748049,0.13245970916748048,0.001266998397567483,0.13200260162353517,0.1341699493408203,0.13443305816650392,0.1346435452270508,"[0.1346961669921875, 0.1341114807128906, 0.1312022705078125, 0.13115029907226564, 0.13112367248535156, 0.1315875244140625, 0.13163551330566406, 0.13236968994140624, 0.13365353393554688, 0.13306694030761718]",tokens/s,1932.6631593031557,kWh,1.5487117942793545e-06,8.486256414471426e-07,6.508118482104666e-06,8.905455917831163e-06,tokens/kWh,28746422.683134936,MB,1562.181632,1959.264256,0.0,1312.817152,1232.77568,s,10,76.92137255859376,7.692137255859374,0.017487914910414803,7.693824951171875,7.702772509765625,7.711682641601563,7.718810747070313,"[7.69504345703125, 7.69082470703125, 7.6926064453125, 7.6866689453125, 7.7205927734375, 7.69639892578125, 7.6467568359375, 7.70079248046875, 7.69208056640625, 7.699607421875]",tokens/s,8.190181467707257,kWh,9.084797721340027e-05,4.979003748630931e-05,0.0003722497123017,0.0005128877270014096,tokens/kWh,122833.9004489902,,s,629,77.98684770965575,0.12398544945891218,0.01575529620495015,0.121744384765625,0.12330782623291016,0.12368689422607422,0.2534359008789062,"[0.12507750701904297, 0.12424604797363281, 0.1234441909790039, 0.12341862487792969, 0.12180786895751954, 0.1224120330810547, 0.12155903625488282, 0.12088832092285157, 0.12079001617431641, 0.12080332946777343, 0.1209518051147461, 0.12134912109375, 0.12099378967285156, 0.12220416259765625, 0.12147408294677735, 0.12154876708984375, 0.12117708587646485, 0.12111360168457032, 0.12086067199707032, 0.12105318450927735, 0.12099174499511718, 0.12216934204101562, 0.12297727966308594, 0.12166143798828125, 0.12153446197509765, 0.12155801391601563, 0.12161023712158203, 0.12131839752197265, 0.12115455627441406, 0.12147609710693359, 0.12320972442626953, 0.12328857421875, 0.12294041442871094, 0.1213460464477539, 0.12115360260009765, 0.12151289367675781, 0.12153343963623046, 0.12244172668457032, 0.12324454498291015, 0.12321791839599609, 0.1221396484375, 0.123504638671875, 0.12190930938720704, 0.12093536376953125, 0.12353228759765625, 0.1231247329711914, 0.12405248260498047, 0.12286566162109375, 0.12216831970214843, 0.1221580810546875, 0.12220928192138672, 0.12256153869628907, 0.12320460510253907, 0.12468940734863282, 0.12308889770507812, 0.1220997085571289, 0.12128972625732422, 0.12146380615234376, 0.12163587188720704, 0.12112380981445313, 0.12155289459228516, 0.12149759674072266, 0.2558975982666016, 0.12277452850341797, 0.12251545715332031, 0.12256665802001954, 0.12305101013183593, 0.12229837036132812, 0.1212968978881836, 0.1239582748413086, 0.12347596740722656, 0.12176076507568359, 0.12155289459228516, 0.12141977691650391, 0.1213685760498047, 0.1208463363647461, 0.12073062133789063, 0.1210040283203125, 0.12351385498046875, 0.12159487915039062, 0.12144640350341797, 0.12170649719238281, 0.12249292755126953, 0.12232396697998046, 0.12123442840576172, 0.12141567993164062, 0.12308889770507812, 0.12184268951416016, 0.1215098876953125, 0.12254208374023437, 0.12142387390136719, 0.12173619079589844, 0.12151910400390625, 0.12153446197509765, 0.12306739044189453, 0.12236492919921875, 0.12201881408691406, 0.12173824310302735, 0.12155699157714844, 0.12209356689453126, 0.12139315032958985, 0.12139520263671875, 0.12346572875976562, 0.12301107025146485, 0.12293836975097656, 0.12192870330810547, 0.12156313323974609, 0.12141056060791015, 0.12382415771484374, 0.12120060729980468, 0.12172902679443359, 0.12317593383789062, 0.12274995422363282, 0.12246323394775391, 0.12149657440185548, 0.12139110565185547, 0.12291788482666016, 0.12123340606689453, 0.12196150207519531, 0.12387631988525391, 0.12276838684082031, 0.12148223876953125, 0.12265267181396484, 0.1214044189453125, 0.12125389099121094, 0.25443635559082034, 0.12313497924804688, 0.12153446197509765, 0.12169830322265625, 0.12147097778320312, 0.12127334594726563, 0.12249088287353516, 0.1221396484375, 0.12196044921875, 0.12192460632324219, 0.12144742584228516, 0.1228226547241211, 0.12275917053222657, 0.12112697601318359, 0.12176787567138672, 0.1217228775024414, 0.12139424133300782, 0.12269356536865235, 0.1223720932006836, 0.12153343963623046, 0.1215478057861328, 0.12162351989746094, 0.12261682891845703, 0.12271308898925781, 0.1215098876953125, 0.12233932495117188, 0.12170342254638672, 0.121238525390625, 0.12113817596435547, 0.12111769866943359, 0.12136653137207032, 0.12165529632568359, 0.1218897933959961, 0.121744384765625, 0.12428594970703125, 0.12338790130615235, 0.12270489501953125, 0.1215129623413086, 0.12190310668945313, 0.12194918060302734, 0.12152114868164063, 0.12212838745117187, 0.12310630035400391, 0.12293427276611328, 0.12179046630859375, 0.12117298889160157, 0.12185906982421875, 0.12301107025146485, 0.1216522216796875, 0.120953857421875, 0.12247654724121093, 0.12268236541748047, 0.12308684539794922, 0.12277657318115234, 0.12169420623779297, 0.12158668518066407, 0.12113510131835938, 0.1211361312866211, 0.1227540512084961, 0.12328550720214844, 0.12320972442626953, 0.12324147033691406, 0.12298242950439453, 0.25302217102050784, 0.1215467529296875, 0.12238642883300781, 0.12331622314453125, 0.12284416198730469, 0.12259225463867188, 0.1225902099609375, 0.12293222045898437, 0.12212531280517579, 0.12088832092285157, 0.12144127655029296, 0.12321894073486328, 0.12279398345947265, 0.12161740875244141, 0.12199628448486328, 0.12130611419677734, 0.12155699157714844, 0.12218367767333985, 0.12152627563476562, 0.12282061004638672, 0.12332236480712891, 0.12172697448730468, 0.1214730224609375, 0.12131743621826171, 0.12206380462646485, 0.12272434997558594, 0.12205158233642578, 0.12277043151855468, 0.12318822479248047, 0.12324352264404297, 0.12150886535644531, 0.12119551849365234, 0.12129280090332031, 0.12111567687988281, 0.1214658203125, 0.12191129302978515, 0.12111974334716796, 0.1216358413696289, 0.12116377258300781, 0.12117708587646485, 0.12161539459228515, 0.1231072998046875, 0.12339097595214844, 0.12407295989990234, 0.12407295989990234, 0.12135116577148437, 0.12155903625488282, 0.12125183868408203, 0.12111154937744141, 0.1215272979736328, 0.12139315032958985, 0.12157440185546875, 0.12285030364990235, 0.12157644653320313, 0.1217976303100586, 0.12137881469726562, 0.12175360107421875, 0.12123750305175782, 0.12148838043212891, 0.12119859313964844, 0.12210585784912109, 0.12211199951171875, 0.12212940979003906, 0.2530303955078125, 0.12125593566894531, 0.12254003143310546, 0.1212252197265625, 0.12286156463623046, 0.12248985290527344, 0.12266598510742187, 0.12236185455322265, 0.12159385681152343, 0.1212590103149414, 0.12165631866455077, 0.12115660858154297, 0.12235878753662109, 0.12386713409423829, 0.1217802276611328, 0.12304691314697265, 0.12239974212646484, 0.12245503997802734, 0.12413849639892578, 0.12299366760253906, 0.12299263763427734, 0.12301107025146485, 0.12142489624023438, 0.12230963134765625, 0.12332236480712891, 0.12175052642822265, 0.12143718719482421, 0.12149247741699219, 0.1212938232421875, 0.12226150512695312, 0.12151193237304687, 0.12162457275390624, 0.12326092529296875, 0.12302438354492187, 0.12452352142333985, 0.12339405059814453, 0.12212735748291016, 0.12276131439208984, 0.12299766540527343, 0.1216911392211914, 0.12096409606933593, 0.12123033905029297, 0.12140953826904297, 0.12123238372802735, 0.12380467224121093, 0.12290969848632813, 0.12300806427001953, 0.12272940826416015, 0.12320665740966796, 0.12299878692626953, 0.12275917053222657, 0.12293119812011719, 0.12306432342529297, 0.12317798614501953, 0.12322406768798828, 0.12217958068847656, 0.1228267822265625, 0.12593353271484375, 0.1234872283935547, 0.12296601867675781, 0.12340940856933594, 0.12347187042236328, 0.12346470642089843, 0.25439436340332033, 0.12301414489746093, 0.1228062744140625, 0.1231431655883789, 0.12312268829345703, 0.12464742279052735, 0.12372991943359375, 0.1234708480834961, 0.12335411071777344, 0.12233523559570313, 0.12217958068847656, 0.12264959716796875, 0.12329881286621094, 0.12281549072265625, 0.12341248321533203, 0.123109375, 0.12312268829345703, 0.12331110382080078, 0.12178943634033203, 0.12153343963623046, 0.12128153228759765, 0.12127027130126954, 0.12139008331298828, 0.12159487915039062, 0.12141875457763672, 0.12109516906738281, 0.12095590209960938, 0.12107469177246094, 0.12224205017089844, 0.1224263687133789, 0.12177005004882813, 0.12150163269042968, 0.12146585845947265, 0.12140953826904297, 0.12113005065917969, 0.12179347229003906, 0.12241817474365234, 0.12146585845947265, 0.1236316146850586, 0.12164096069335938, 0.12152934265136718, 0.12139622497558594, 0.12146688079833984, 0.12121600341796875, 0.12141567993164062, 0.12133478546142579, 0.12168498992919922, 0.1214167709350586, 0.12150675201416015, 0.12333977508544922, 0.12334899139404297, 0.12139826965332032, 0.12266291046142579, 0.12162457275390624, 0.1213675537109375, 0.12148838043212891, 0.123219970703125, 0.12320358276367188, 0.12155494689941407, 0.12110848236083985, 0.12138294219970704, 0.12153238677978516, 0.12358963012695312, 0.25359359741210935, 0.12145458984375, 0.12112588500976562, 0.12119039916992187, 0.12124671936035156, 0.12149964904785156, 0.12119961547851563, 0.12119245147705078, 0.12146688079833984, 0.12120269012451172, 0.12097740936279297, 0.1208616943359375, 0.12124671936035156, 0.12146176147460938, 0.12102349090576171, 0.1214505615234375, 0.12145657348632813, 0.12126924896240235, 0.12115869140625, 0.12115657806396485, 0.12137574768066406, 0.12156928253173828, 0.12129894256591797, 0.1242234878540039, 0.12195740509033202, 0.12128765106201173, 0.12127436828613282, 0.12119142150878906, 0.12085968017578125, 0.12119548797607421, 0.12113203430175781, 0.12153139495849609, 0.12129587554931641, 0.12157234954833984, 0.12127641296386718, 0.12110438537597656, 0.12128870391845703, 0.12145970916748047, 0.12127846527099609, 0.12160921478271484, 0.12154879760742188, 0.1213306884765625, 0.12123238372802735, 0.12116070556640625, 0.12110643005371094, 0.12110031890869141, 0.12115042877197266, 0.12125593566894531, 0.12130406188964844, 0.12108185577392579, 0.12092108917236329, 0.12128562927246093, 0.12140748596191406, 0.12148838043212891, 0.12150784301757812, 0.12131737518310547, 0.12162969970703125, 0.12153958129882812, 0.122281982421875, 0.12196249389648438, 0.12154879760742188, 0.1211883544921875, 0.12114022064208985, 0.25422848510742185, 0.12144435119628906, 0.12121600341796875, 0.12331622314453125, 0.12173926544189453, 0.12114329528808594, 0.1231800308227539, 0.1233438720703125, 0.1232721939086914, 0.12304691314697265, 0.12367155456542969, 0.12309913635253907, 0.12304998779296875, 0.12310630035400391, 0.12295680236816406, 0.12294348907470704, 0.12386918640136718, 0.12152320098876954, 0.12160205078125, 0.121059326171875, 0.12094258880615234, 0.12096717071533203, 0.12145868682861329, 0.12323331451416016, 0.12289020538330078, 0.12314832305908203, 0.1236971206665039, 0.12315955352783203, 0.12311039733886718, 0.12137471771240234, 0.12235366058349609, 0.1222451171875, 0.1222973403930664, 0.12167884826660157, 0.12279296112060546, 0.12148735809326172, 0.12146995544433593, 0.12277760314941406, 0.12156723022460937, 0.1216358413696289, 0.12169728088378906, 0.12312166595458984, 0.12145664215087891, 0.12275507354736329, 0.1232353286743164, 0.12311347198486328, 0.123072509765625, 0.1227171859741211, 0.12111154937744141, 0.12276019287109376, 0.12255232238769531, 0.12235059356689452, 0.12264141082763672, 0.1211514892578125, 0.12123545837402344, 0.12166246032714843, 0.12119347381591797, 0.12130201721191407, 0.12149964904785156, 0.12166246032714843, 0.12141875457763672, 0.12106034851074218, 0.12132147216796875, 0.25745306396484374, 0.12312064361572266, 0.12307865905761718, 0.12312166595458984, 0.12121913909912109, 0.12116063690185547, 0.1212774429321289, 0.12236185455322265, 0.1215498275756836, 0.12132454681396485, 0.12093132781982421, 0.12260454559326171, 0.12201676940917969, 0.12116480255126953, 0.12119449615478516, 0.121206787109375, 0.12122112274169922, 0.12207103729248046, 0.12143001556396485, 0.12134400177001953, 0.12148531341552735, 0.12378009796142578, 0.12321279907226562, 0.12303257751464844, 0.12391117095947266, 0.12308889770507812, 0.12307762908935548, 0.12230758666992188, 0.12212636566162109, 0.12145769500732422, 0.12122207641601562, 0.12131123352050781, 0.12108799743652343, 0.1211156463623047, 0.12251033782958984, 0.12306432342529297, 0.12318310546875, 0.12159795379638672, 0.12140850830078125, 0.12139520263671875, 0.12131839752197265, 0.12117196655273438, 0.12120371246337891, 0.12156723022460937, 0.12148941040039063, 0.12145664215087891, 0.12146892547607421, 0.1215447006225586, 0.12250316619873047, 0.12295782470703125, 0.12217036437988281, 0.12270387268066406, 0.12305919647216797, 0.12331520080566406, 0.12305203247070312, 0.12250521850585938, 0.12303667449951172, 0.12276838684082031, 0.12293427276611328, 0.12293427276611328, 0.12159283447265624, 0.12137062072753907, 0.12141670227050781, 0.2548654022216797, 0.1212416000366211, 0.12151193237304687, 0.12308480072021484, 0.12140338897705077, 0.12150377655029297, 0.12124156951904297, 0.121275390625, 0.12150374603271484, 0.12148838043212891, 0.12146482849121094, 0.12227788543701172, 0.12289740753173828, 0.12292819213867187, 0.12136339569091797, 0.1211822052001953, 0.12195027160644531, 0.12174432373046876, 0.12153241729736328, 0.12099174499511718, 0.12128463745117188, 0.12138390350341798, 0.121385986328125, 0.12258918762207031, 0.1230161895751953, 0.12225638580322265, 0.12275917053222657, 0.12151602935791016, 0.12140850830078125, 0.12134809875488281, 0.12176076507568359, 0.1217791976928711, 0.12272946929931641, 0.12169420623779297, 0.12219187164306641, 0.12145561981201172, 0.12164096069335938, 0.12145254516601563, 0.12175360107421875, 0.12436787414550782, 0.1233602523803711, 0.12332339477539063, 0.1232701416015625, 0.12218675231933594, 0.1233039321899414, 0.12280217742919922, 0.12283596801757812, 0.1221048355102539, 0.12304793548583984, 0.1227509765625, 0.12293023681640625, 0.12202182769775391, 0.12274483489990234, 0.1233070068359375, 0.12290560150146485, 0.12302130889892578, 0.1229496307373047, 0.12324864196777344, 0.12319129943847656, 0.12199935913085938, 0.12175363159179688, 0.12325577545166015, 0.12313600158691407]",tokens/s,8.06546255519598,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1368.223744,6256.328704,0.0,5609.8816,5292.617728,s,10,5.657606628417969,0.5657606628417968,0.0009498936514595362,0.5654808349609375,0.5660143920898437,0.5672829833984374,0.5682978564453125,"[0.5685515747070312, 0.5654931030273438, 0.565177734375, 0.5654685668945313, 0.5652743530273437, 0.5651702880859375, 0.5656128540039063, 0.5657227172851562, 0.5657324829101562, 0.5654029541015625]",tokens/s,452.48815764977473,kWh,6.682694372203615e-06,3.660675940348786e-06,3.1253065125888854e-05,4.159643543844126e-05,tokens/kWh,6154373.50103846,MB,1368.551424,6256.328704,0.0,5609.8816,5503.949312,s,10,330.81899609375,33.081899609375,0.006904428548072782,33.079876953125,33.091131640625,33.0930072265625,33.0945076953125,"[33.07798046875, 33.078234375, 33.07119921875, 33.0948828125, 33.09071484375, 33.08729296875, 33.08378515625, 33.07924609375, 33.0805078125, 33.07515234375]",tokens/s,1.9043646448327465,kWh,0.0003903993130889204,0.00021397087349180462,0.001808246989806311,0.0024126171763870358,tokens/kWh,26112.721328770578,,s,629,335.3824949340819,0.5331995149985406,0.06720644230645373,0.5250816040039062,0.52554873046875,0.52574228515625,1.0904419140625001,"[0.5248307495117187, 0.5246986083984375, 0.5245040893554688, 0.524674072265625, 0.52523828125, 0.5248563232421875, 0.5253027954101562, 0.5246597290039062, 0.5248173828125, 0.5247999877929688, 0.5255188598632813, 0.5246453247070313, 0.5245347900390624, 0.5246586303710937, 0.525032470703125, 0.5246986083984375, 0.5246525268554687, 0.5253273315429687, 0.5247651977539063, 0.5249915161132812, 0.5252413330078125, 0.5252423706054687, 0.5247785034179687, 0.5247273559570312, 0.5247057495117188, 0.5248440551757813, 0.5247232055664063, 0.5247467651367187, 0.5255465087890625, 0.5256212768554688, 0.5258025512695312, 0.5255167846679687, 0.5248409423828125, 0.5254369506835938, 0.5253928833007813, 0.5247928466796875, 0.5255188598632813, 0.5249791870117188, 0.5249310913085937, 0.5250969848632813, 0.5247672119140625, 0.5248972778320312, 0.5248655395507813, 0.5252689819335937, 0.5248040771484375, 0.525211669921875, 0.5255167846679687, 0.525169677734375, 0.5251573486328125, 0.524832763671875, 0.5248081665039063, 0.5248256225585938, 0.5252925415039063, 0.5251204833984375, 0.525022216796875, 0.5254482421875, 0.525276123046875, 0.5254348754882813, 0.5250242309570312, 0.5254471435546875, 0.5251328125, 0.5251553344726563, 1.0913822021484374, 0.5247088623046875, 0.52463818359375, 0.5248921508789063, 0.5250447387695313, 0.52459423828125, 0.5247610473632812, 0.524621826171875, 0.5250303955078125, 0.5247098999023437, 0.5248706665039062, 0.52496484375, 0.524769287109375, 0.525318115234375, 0.5248880615234375, 0.5248450317382812, 0.5251092529296875, 0.5249443969726563, 0.5248491821289063, 0.5246842651367187, 0.524643310546875, 0.5247713012695312, 0.5255167846679687, 0.5258618774414062, 0.5249320678710937, 0.5256673583984375, 0.5253673095703125, 0.525570068359375, 0.5251849975585937, 0.5251604614257812, 0.5255874633789063, 0.5250549926757813, 0.5249617309570312, 0.5247528686523437, 0.5250518798828125, 0.525127685546875, 0.5249607543945313, 0.5249474487304687, 0.525033447265625, 0.5248983154296875, 0.5250068359375, 0.52510205078125, 0.5252761840820312, 0.5249771728515625, 0.5250181274414063, 0.5249228515625, 0.5253406982421875, 0.5251481323242188, 0.5250140380859375, 0.5252628784179687, 0.525149169921875, 0.5251195068359376, 0.5251184692382812, 0.5249095458984375, 0.5248972778320312, 0.5250303955078125, 0.525391845703125, 0.524969970703125, 0.5247293701171875, 0.5249556274414062, 0.5251163940429687, 0.5255157470703125, 0.5251348266601562, 1.090335693359375, 0.524874755859375, 0.5245787963867188, 0.524506103515625, 0.5248809204101562, 0.5247160034179688, 0.5246280517578125, 0.5246934204101562, 0.5246658325195312, 0.5245573120117187, 0.5246976928710938, 0.5247764282226562, 0.524717041015625, 0.5248132934570312, 0.52461669921875, 0.52514306640625, 0.525487060546875, 0.5247119140625, 0.524663818359375, 0.524600341796875, 0.524516357421875, 0.524632080078125, 0.5244651489257812, 0.5252577514648438, 0.5246760864257812, 0.524959716796875, 0.5246556396484375, 0.5247354736328125, 0.5250734252929687, 0.5248143310546876, 0.5247682495117187, 0.5251287231445313, 0.5246996459960938, 0.5248102416992187, 0.5250416870117187, 0.5254154052734376, 0.5251204833984375, 0.524943359375, 0.5247672119140625, 0.5249054565429687, 0.5247723388671875, 0.5249924926757813, 0.5250969848632813, 0.5248839721679688, 0.524780517578125, 0.5248870239257812, 0.5249146728515625, 0.525117431640625, 0.5246310424804688, 0.5249474487304687, 0.5247897338867188, 0.52493310546875, 0.5250109252929688, 0.5253570556640625, 0.5257471923828125, 0.5256693725585937, 0.52505908203125, 0.5255249633789062, 0.5257697143554687, 0.5254031372070312, 0.5251993408203125, 0.5257103271484375, 0.5251307373046875, 1.090680908203125, 0.525180908203125, 0.524788818359375, 0.5252505493164062, 0.5254584350585938, 0.5254379272460937, 0.52552294921875, 0.5255587768554687, 0.525601806640625, 0.5258516235351562, 0.5258157958984375, 0.5254614868164063, 0.5251604614257812, 0.5255485229492187, 0.5253007202148438, 0.5253621826171875, 0.5256417236328125, 0.525749267578125, 0.5251840209960937, 0.5252259521484375, 0.5253324584960938, 0.5253058471679688, 0.52491162109375, 0.5250846557617187, 0.525254638671875, 0.525391845703125, 0.52520654296875, 0.5254430541992188, 0.5258055419921875, 0.525138916015625, 0.5251840209960937, 0.5249403076171875, 0.525707275390625, 0.5251296997070313, 0.52486962890625, 0.5257564086914063, 0.5252894897460938, 0.5254993896484375, 0.5254983520507812, 0.5253345336914063, 0.5253857421875, 0.5251768188476562, 0.52502734375, 0.5253447875976562, 0.5251942138671875, 0.52514306640625, 0.5253836669921875, 0.5265366821289063, 0.5247119140625, 0.5249392700195312, 0.5253396606445313, 0.5246771240234375, 0.5253150634765625, 0.5247682495117187, 0.5253621826171875, 0.5255966796875, 0.5253765258789063, 0.5253641967773437, 0.5249669189453126, 0.5252526245117187, 0.5254635009765625, 0.5250498657226562, 0.5251553344726563, 1.090404296875, 0.524969970703125, 0.5259376831054687, 0.5252003784179687, 0.5258106689453125, 0.5252955932617187, 0.5250447387695313, 0.525154296875, 0.5248737182617188, 0.5253058471679688, 0.5248440551757813, 0.524874755859375, 0.5246546020507813, 0.5248102416992187, 0.5245665283203125, 0.5248348388671875, 0.5247498168945313, 0.5249761352539063, 0.5246576538085937, 0.5247181396484375, 0.5246954956054688, 0.5256632080078125, 0.5248604125976563, 0.524747802734375, 0.5248604125976563, 0.5250211791992188, 0.5254573974609374, 0.5250242309570312, 0.52548095703125, 0.5251154174804687, 0.5251461181640625, 0.5252608032226562, 0.52522607421875, 0.5251276245117188, 0.5253990478515626, 0.5256478881835938, 0.525464599609375, 0.5254779052734375, 0.5252710571289062, 0.5255720825195312, 0.5254932250976563, 0.52564892578125, 0.5251912231445313, 0.5251798095703125, 0.5252771606445312, 0.5259796752929687, 0.5252730712890625, 0.5259386596679687, 0.5251870727539063, 0.525286376953125, 0.52527001953125, 0.5250816040039062, 0.525275146484375, 0.52520654296875, 0.5252454833984375, 0.5256109619140625, 0.5253765258789063, 0.52531201171875, 0.5252474975585938, 0.5265930786132812, 0.525812744140625, 0.5261884765625, 0.5251481323242188, 1.09045654296875, 0.5245020141601563, 0.5246781616210937, 0.5249658813476562, 0.5249915161132812, 0.5245040893554688, 0.52481640625, 0.5248297119140625, 0.5247836303710938, 0.524885986328125, 0.5248921508789063, 0.52484814453125, 0.5248286743164062, 0.524632080078125, 0.5257512817382812, 0.524990478515625, 0.5250908203125, 0.5249392700195312, 0.5247129516601563, 0.5252608032226562, 0.5249863891601563, 0.5254144287109375, 0.524600341796875, 0.5249567260742187, 0.5247467041015625, 0.5249075317382812, 0.5252260131835937, 0.5248870239257812, 0.5250662231445312, 0.5250048828125, 0.5253692626953125, 0.5249249267578125, 0.524853271484375, 0.5260421142578126, 0.5258383178710937, 0.5250867309570313, 0.5253775634765625, 0.524843017578125, 0.525390869140625, 0.5253294067382812, 0.5256530151367188, 0.5254686889648438, 0.525365234375, 0.525201416015625, 0.5253119506835937, 0.5253365478515625, 0.5253560180664063, 0.5255802612304687, 0.5252782592773437, 0.5253252563476563, 0.5253990478515626, 0.52508056640625, 0.5262468872070313, 0.5258137817382813, 0.525549560546875, 0.5252843627929688, 0.5254717407226562, 0.525453369140625, 0.5256119384765625, 0.5255628662109375, 0.5259458618164062, 0.5253795776367187, 0.52560693359375, 1.0914058837890626, 0.5248624877929687, 0.5247764282226562, 0.5250928344726562, 0.5249915161132812, 0.5249924926757813, 0.524921875, 0.5255986938476562, 0.525233154296875, 0.5249976196289062, 0.525106201171875, 0.525254638671875, 0.5254993896484375, 0.52495361328125, 0.5254133911132812, 0.5256365966796875, 0.5252393188476563, 0.5251287231445313, 0.5250734252929687, 0.5250263061523438, 0.5252188110351562, 0.5251512451171875, 0.5254318237304687, 0.5252034301757813, 0.5248993530273437, 0.5250560302734375, 0.5257349243164062, 0.5247313842773438, 0.5247518920898437, 0.5246392211914063, 0.5248256225585938, 0.5250816040039062, 0.5247979736328126, 0.5251195068359376, 0.5250089111328125, 0.525053955078125, 0.5249034423828125, 0.5249392700195312, 0.525000732421875, 0.5251604614257812, 0.5249515380859375, 0.5250344848632813, 0.5250201416015625, 0.524843017578125, 0.5250570068359375, 0.525391845703125, 0.5252781982421875, 0.5252321166992188, 0.5249515380859375, 0.5251287231445313, 0.5253140258789063, 0.524959716796875, 0.5254113159179687, 0.5252976684570313, 0.5251635131835938, 0.525433837890625, 0.5251604614257812, 0.5252894897460938, 0.5252474975585938, 0.5253939208984375, 0.5253867797851562, 0.5253079223632813, 0.525212646484375, 1.0918123779296875, 0.5249392700195312, 0.5248511962890625, 0.5250714111328125, 0.525445068359375, 0.5252464599609376, 0.5249423217773438, 0.5251881103515625, 0.5252290649414062, 0.5253109741210937, 0.5252495727539063, 0.524802001953125, 0.52481640625, 0.5247365112304687, 0.5250303955078125, 0.5251235961914062, 0.5250078735351562, 0.5248880615234375, 0.5249197998046875, 0.5250938720703126, 0.525497314453125, 0.5250078735351562, 0.5247528686523437, 0.5247733764648438, 0.524821533203125, 0.524938232421875, 0.5251942138671875, 0.5249832763671874, 0.5250949096679688, 0.5249238891601562, 0.524906494140625, 0.5249658813476562, 0.5247979736328126, 0.5248522338867188, 0.52481640625, 0.52491162109375, 0.5247928466796875, 0.5249320678710937, 0.5248573608398438, 0.5247938842773437, 0.5249791870117188, 0.52478466796875, 0.5247897338867188, 0.5247600708007812, 0.52487890625, 0.5249884033203125, 0.5252925415039063, 0.5250416870117187, 0.5249464111328125, 0.5251000366210937, 0.5250816040039062, 0.5249525756835938, 0.5252413940429688, 0.5255003662109375, 0.5252474975585938, 0.5254031372070312, 0.5251502075195312, 0.5256693725585937, 0.5256007690429687, 0.5253478393554688, 0.5253816528320312, 0.525201416015625, 0.5253990478515626, 1.0916168212890625, 0.52502734375, 0.5249310913085937, 0.524822509765625, 0.5248081665039063, 0.5246361694335937, 0.5247744140625, 0.5250693359375, 0.5248952026367187, 0.5249024047851563, 0.5249915161132812, 0.5248173828125, 0.525412353515625, 0.5252208862304687, 0.5250160522460937, 0.524969970703125, 0.525085693359375, 0.5248706665039062, 0.5251307373046875, 0.5251522827148437, 0.5256038208007813, 0.5248409423828125, 0.5249832763671874, 0.5250211791992188, 0.524906494140625, 0.524864501953125, 0.52510107421875, 0.5247754516601563, 0.5251942138671875, 0.5250908203125, 0.5250857543945312, 0.5254880981445312, 0.5257154541015625, 0.5253683471679688, 0.5251204833984375, 0.5250826416015625, 0.525117431640625, 0.525169677734375, 0.5254266967773438, 0.525581298828125, 0.5249771728515625, 0.5250396118164062, 0.5251522827148437, 0.5250263061523438, 0.5251163940429687, 0.5250775146484375, 0.5251696166992188, 0.5251348266601562, 0.5248726806640625, 0.5248040771484375, 0.525322265625, 0.5251829833984375, 0.5251051635742188, 0.5248706665039062, 0.5250303955078125, 0.5250109252929688, 0.5251963500976562, 0.5250313720703125, 0.5255403442382812, 0.5248737182617188, 0.5250242309570312, 0.5249843139648438, 0.5250836181640625, 1.0909224853515624, 0.5246965942382813, 0.5248829345703125, 0.5249362182617188, 0.5247160034179688, 0.5250303955078125, 0.5250089111328125, 0.5251287231445313, 0.5249238891601562, 0.5249658813476562, 0.5249515380859375, 0.5246607055664062, 0.52463720703125, 0.525317138671875, 0.5247047729492188, 0.5247109375, 0.5245911254882812, 0.5245593872070312, 0.5245286254882813, 0.5245368041992188, 0.5244630737304687, 0.5245419311523437, 0.5247344360351562, 0.5246781616210937, 0.5250242309570312, 0.525285400390625, 0.5248081665039063, 0.5248297119140625, 0.5248153686523438, 0.5249310913085937, 0.5255239868164062, 0.5253621826171875, 0.5255106811523438, 0.5247969360351562, 0.52493115234375, 0.5251900634765625, 0.525365234375, 0.52534375, 0.52518603515625, 0.5249095458984375, 0.524705810546875, 0.5250477905273437, 0.5248071899414063, 0.5250426635742188, 0.5253519287109375, 0.52510205078125, 0.5249894409179687, 0.5251450805664063, 0.52478466796875, 0.5255065307617187, 0.5247191162109375, 0.5256325073242187, 0.5251266479492187, 0.5251030883789063, 0.5249208374023437, 0.5255485229492187, 0.525053955078125, 0.5254256591796875, 0.525159423828125, 0.525106201171875, 0.5251512451171875, 0.5250416870117187, 0.5253683471679688]",tokens/s,1.8754705731544732,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2628.173824,8389.132288,0.0,7742.685184,7007.145472,s,10,5.8011931152343745,0.5801193115234377,0.0010375594919882804,0.5799924316406251,0.5812619140624999,0.5815937561035156,0.5818592297363281,"[0.5800941772460938, 0.5819255981445313, 0.5788403930664062, 0.5789609375, 0.5798906860351563, 0.5787822265625, 0.57971533203125, 0.5806090698242188, 0.5811865234375, 0.5811881713867187]",tokens/s,441.28853309110576,kWh,6.83268827420694e-06,3.744032605603327e-06,3.3321369249667614e-05,4.3898090129477875e-05,tokens/kWh,5831688.787483131,MB,2628.173824,8389.132288,0.0,7742.685184,7283.985408,s,10,337.06498046875,33.706498046875,0.006009418787724905,33.706857421875,33.7119703125,33.715088671875,33.717583359375006,"[33.70903515625, 33.70971875, 33.7039609375, 33.70134375, 33.71127734375, 33.70996875, 33.71820703125, 33.69992578125, 33.69686328125, 33.7046796875]",tokens/s,1.869075805869452,kWh,0.0003979287963388143,0.0002180992035304977,0.0019156344121359354,0.0025316624120052473,tokens/kWh,24884.8344476149,,s,629,341.74955133056636,0.5433220211932693,0.06898772267341372,0.5349846801757813,0.5355790161132813,0.5358112670898437,1.1147944775390626,"[0.5351260375976562, 0.535109619140625, 0.534408203125, 0.5347737426757813, 0.5349273681640625, 0.5355181884765625, 0.5343795166015625, 0.5353154296875, 0.5344747314453125, 0.5348731079101563, 0.5341849365234375, 0.5352171630859375, 0.5348259887695312, 0.5350338745117188, 0.5346682739257812, 0.5355919189453126, 0.5350399780273437, 0.535109619140625, 0.53498779296875, 0.5350379638671875, 0.5342330932617188, 0.534898681640625, 0.5348997192382813, 0.535445556640625, 0.5350767822265625, 0.5357158203125, 0.5351649169921875, 0.53486181640625, 0.5343006591796875, 0.5351065673828125, 0.5353646240234375, 0.5352591552734375, 0.53469091796875, 0.5347808227539063, 0.5347430419921875, 0.5353215942382813, 0.5346262817382812, 0.5358970947265626, 0.5353533325195312, 0.535372802734375, 0.5349212646484375, 0.5354249877929688, 0.5347993774414063, 0.5352724609375, 0.5352263793945312, 0.5359462280273437, 0.5349867553710937, 0.5358069458007813, 0.5356390380859375, 0.5357721557617188, 0.5345842895507813, 0.5350502319335938, 0.5345679321289063, 0.5353901977539063, 0.534408203125, 0.5348423461914062, 0.5349427490234375, 0.5352744750976562, 0.5348515625, 0.5350000610351563, 0.5347225341796875, 0.5350891723632812, 1.117781005859375, 0.5343908081054688, 0.5352744750976562, 0.5350615234375, 0.5358694458007812, 0.5350860595703125, 0.5349007568359375, 0.5344706420898437, 0.5346253051757812, 0.534213623046875, 0.5346007080078125, 0.534765625, 0.5350972900390625, 0.5347195434570312, 0.5356298217773438, 0.5349805908203125, 0.5357711181640625, 0.5348290405273437, 0.5349857177734375, 0.5353421020507813, 0.5351044921875, 0.5350942993164063, 0.5355233154296875, 0.5351280517578125, 0.536111083984375, 0.53477783203125, 0.5354240112304688, 0.5351700439453125, 0.5359093627929687, 0.5350686645507813, 0.5352868041992187, 0.5346743774414062, 0.534993896484375, 0.534455322265625, 0.5347471313476563, 0.5344000244140625, 0.5349601440429688, 0.5342822265625, 0.5353840942382813, 0.5345341186523438, 0.5348598022460938, 0.5351505737304687, 0.5354915771484375, 0.5350184936523438, 0.535066650390625, 0.5345361328125, 0.53806591796875, 0.5347102661132812, 0.535362548828125, 0.5352243041992187, 0.5350154418945312, 0.5345475463867188, 0.5348187255859375, 0.5344368896484375, 0.5351710815429688, 0.5345422973632813, 0.5351874389648438, 0.5350225830078125, 0.5353594970703125, 0.5346467895507813, 0.53593701171875, 0.5349805908203125, 0.535109619140625, 1.11461376953125, 0.5346682739257812, 0.5355612182617188, 0.535363525390625, 0.5350051879882812, 0.5345147094726562, 0.5349580688476563, 0.5345648803710937, 0.5349765014648438, 0.5342883911132813, 0.534782958984375, 0.5348720703125, 0.5348485107421875, 0.5344696044921875, 0.5351680297851562, 0.535014404296875, 0.5347891235351563, 0.5343958740234375, 0.534729736328125, 0.5343323974609375, 0.5347850341796875, 0.534245361328125, 0.5351393432617187, 0.5362525024414062, 0.53484130859375, 0.5343948974609375, 0.53480859375, 0.5345147094726562, 0.535352294921875, 0.5345781860351563, 0.5349120483398437, 0.5347604370117187, 0.5352069091796875, 0.5347593994140625, 0.5351116943359375, 0.5349611206054687, 0.5350625, 0.5345709838867188, 0.535088134765625, 0.5350850830078125, 0.535235595703125, 0.53505126953125, 0.5352868041992187, 0.5351475219726562, 0.5351701049804688, 0.535184326171875, 0.5351956176757813, 0.536447998046875, 0.5354301147460937, 0.53513427734375, 0.5351146850585937, 0.5348946533203125, 0.5352826538085937, 0.5349775390625, 0.5352734985351563, 0.5348341674804687, 0.5351444702148438, 0.5347440185546875, 0.53526220703125, 0.5349683227539063, 0.5353143920898438, 0.5344860229492188, 0.534940673828125, 1.1146219482421875, 0.5341777954101562, 0.5348720703125, 0.5346262817382812, 0.5350717163085937, 0.5346693725585937, 0.5351198120117188, 0.5349754638671875, 0.53484033203125, 0.5343754272460938, 0.5346856689453126, 0.534470703125, 0.5363936767578125, 0.5344901123046875, 0.5354598388671875, 0.5345167236328126, 0.535103515625, 0.5345525512695313, 0.5347041015625, 0.5342689208984375, 0.53492431640625, 0.5343795166015625, 0.5349335327148438, 0.5343723754882812, 0.53469287109375, 0.5351454467773438, 0.5353369750976562, 0.5347051391601563, 0.5347532958984375, 0.5346580200195312, 0.5352058715820313, 0.5351024780273438, 0.5352191772460938, 0.53522021484375, 0.5348075561523438, 0.534297607421875, 0.534724609375, 0.5345833129882812, 0.5349539794921875, 0.5344808959960937, 0.53492529296875, 0.5351454467773438, 0.5350830078125, 0.5344010009765625, 0.5376593627929688, 0.5356707763671875, 0.5352120361328125, 0.5348014526367187, 0.5355078735351563, 0.5344962768554687, 0.5350451049804688, 0.5349385986328125, 0.5351188354492188, 0.5344829711914062, 0.5351536865234375, 0.5346395874023437, 0.5351792602539063, 0.534635498046875, 0.5352366333007812, 0.534640625, 0.5355222778320312, 0.53492431640625, 0.5352960205078126, 1.1157667236328126, 0.5344829711914062, 0.5347286987304688, 0.53443994140625, 0.5350502319335938, 0.5349959716796875, 0.5357506713867187, 0.5345075073242187, 0.5348935546875, 0.5344307250976562, 0.5347952880859375, 0.5344050903320312, 0.5348792114257812, 0.5342566528320313, 0.5350963745117188, 0.5344285888671875, 0.534782958984375, 0.5359862060546875, 0.5356195678710938, 0.53477783203125, 0.5348853759765625, 0.534413330078125, 0.5349529418945312, 0.5347205200195313, 0.5349427490234375, 0.5349754638671875, 0.5349099731445313, 0.5344389038085937, 0.5352949829101562, 0.5348628540039062, 0.5350799560546875, 0.5344368896484375, 0.5352714233398438, 0.5348720703125, 0.5350604858398438, 0.534603759765625, 0.5353707275390625, 0.5352437744140625, 0.535731201171875, 0.5351393432617187, 0.535103515625, 0.5343733520507813, 0.534950927734375, 0.53507275390625, 0.535462890625, 0.5348690185546875, 0.5352427368164062, 0.5344849853515625, 0.5353912353515625, 0.5345740966796875, 0.5349765014648438, 0.5348229370117188, 0.5356441650390625, 0.5349171142578125, 0.5356072998046875, 0.5357066040039062, 0.5356267700195313, 0.5386843872070313, 0.535677978515625, 0.53519970703125, 0.5360302124023437, 0.5351813354492188, 0.5363681030273437, 1.114861572265625, 0.534814697265625, 0.5358141479492188, 0.5348894653320313, 0.535541748046875, 0.5344706420898437, 0.5349130249023437, 0.5350021362304688, 0.5350850830078125, 0.534813720703125, 0.5355775756835938, 0.5344389038085937, 0.53571484375, 0.5351659545898437, 0.5355847778320313, 0.53517822265625, 0.5354332275390625, 0.53538818359375, 0.5351044921875, 0.5344112548828125, 0.5350000610351563, 0.5343201293945312, 0.5348331298828125, 0.5342853393554687, 0.5347276611328124, 0.5348331298828125, 0.534877197265625, 0.534382568359375, 0.5348782348632812, 0.5349212036132812, 0.5350440673828125, 0.5346990356445313, 0.5352007446289062, 0.5345945434570313, 0.5349151000976563, 0.5347184448242187, 0.5349703979492187, 0.5343866577148437, 0.5375672607421875, 0.5345842895507813, 0.535604248046875, 0.5345730590820312, 0.5350553588867187, 0.5347706909179688, 0.5352007446289062, 0.5347727661132813, 0.535562255859375, 0.5352611694335937, 0.5354332275390625, 0.5348392944335938, 0.5353809814453125, 0.5349007568359375, 0.5352796020507813, 0.5347973022460938, 0.535889892578125, 0.535098388671875, 0.5354475708007812, 0.5351219482421875, 0.5354977416992187, 0.5351229248046875, 0.5353697509765625, 0.5347573852539063, 0.5351884765625, 1.11545751953125, 0.534709228515625, 0.5355346069335938, 0.5345218505859375, 0.53505126953125, 0.5347532958984375, 0.5353421020507813, 0.5347676391601562, 0.535130126953125, 0.53502978515625, 0.5353748779296875, 0.5347379150390625, 0.53549462890625, 0.5350656127929687, 0.53538818359375, 0.5347123413085938, 0.53515673828125, 0.5347584228515625, 0.5352509155273437, 0.5351055297851562, 0.5358919677734375, 0.5351751708984375, 0.535287841796875, 0.5365626831054687, 0.5354188842773437, 0.5353696899414062, 0.5352734985351563, 0.5352540283203125, 0.5353543701171875, 0.5349550170898437, 0.5355130615234375, 0.5348792114257812, 0.535568359375, 0.5348311767578126, 0.5353768310546875, 0.5346519165039062, 0.53517822265625, 0.5345648803710937, 0.5352017822265625, 0.535309326171875, 0.5357240600585937, 0.5350317993164062, 0.5355181884765625, 0.5348699951171875, 0.5352345581054687, 0.5348505859375, 0.5359411010742188, 0.5351085815429687, 0.5355888671875, 0.5350133666992187, 0.5354229736328125, 0.5347727661132813, 0.5353400268554688, 0.5348945922851562, 0.5355601806640625, 0.5356461791992188, 0.5350768432617188, 0.5353328857421875, 0.5356503295898437, 0.5349284057617187, 0.53524169921875, 0.53501953125, 0.5352315063476563, 1.1161282958984375, 0.5346826171875, 0.53496630859375, 0.5344050903320312, 0.5346508178710937, 0.5344050903320312, 0.5348168334960938, 0.5343477172851563, 0.5350819702148437, 0.5347593994140625, 0.5352212524414063, 0.534572021484375, 0.5355878295898437, 0.53490380859375, 0.5348966674804687, 0.5346734008789062, 0.5364859008789062, 0.53452392578125, 0.5349498901367188, 0.5348843383789063, 0.5350963134765625, 0.5344942016601563, 0.5349078979492188, 0.5345894165039062, 0.5349498901367188, 0.5344747314453125, 0.53500927734375, 0.5347010498046875, 0.535119873046875, 0.5345147094726562, 0.53530419921875, 0.5354342651367188, 0.535141357421875, 0.5348782348632812, 0.5348505859375, 0.5344890747070312, 0.5351444702148438, 0.53507275390625, 0.535056396484375, 0.5347092895507812, 0.5349498291015625, 0.534508544921875, 0.5350236206054687, 0.5345904541015625, 0.535593994140625, 0.5347225341796875, 0.5352581176757812, 0.5349776000976563, 0.5351546020507812, 0.5346416625976562, 0.5357752075195312, 0.5347604370117187, 0.5349846801757813, 0.5344286499023437, 0.5350154418945312, 0.5346324462890625, 0.5349918823242188, 0.5349908447265626, 0.5351976928710938, 0.5346375732421875, 0.5349151000976563, 0.5348065185546875, 0.5351188354492188, 1.1169342041015624, 0.534445068359375, 0.5351669921875, 0.5345443725585938, 0.53528369140625, 0.534782958984375, 0.5349293823242187, 0.5344174194335938, 0.5348372192382812, 0.5343057861328125, 0.53471435546875, 0.534593505859375, 0.5350717163085937, 0.5345064697265625, 0.5349273681640625, 0.5345054931640625, 0.5349867553710937, 0.535751708984375, 0.5350215454101562, 0.5345771484375, 0.535593994140625, 0.534761474609375, 0.5349488525390625, 0.5347625122070313, 0.5349222412109375, 0.5344276733398438, 0.53481982421875, 0.5344440307617188, 0.5348536376953125, 0.5344583740234375, 0.5348925170898438, 0.5345833129882812, 0.5350532836914063, 0.5344010009765625, 0.5347758178710937, 0.5343866577148437, 0.5347593994140625, 0.534382568359375, 0.5348362426757812, 0.53475634765625, 0.5349376831054687, 0.5345422973632813, 0.5351393432617187, 0.53452490234375, 0.5351270141601563, 0.5344603881835938, 0.5351065673828125, 0.5345259399414063, 0.5350799560546875, 0.5351802978515625, 0.5355919189453126, 0.5348382568359376, 0.5353246459960938, 0.5350021362304688, 0.535816162109375, 0.5346324462890625, 0.5352212524414063, 0.5346836547851562, 0.5353799438476563, 0.5346682739257812, 0.5351454467773438, 0.5346416625976562, 0.53532568359375, 1.116949462890625, 0.5343743896484375, 0.5350113525390625, 0.5348218994140626, 0.5349816284179687, 0.5344235229492188, 0.5348731079101563, 0.534445068359375, 0.534877197265625, 0.53441943359375, 0.5347359008789062, 0.5345535888671875, 0.5347891235351563, 0.5345515747070313, 0.5351004028320312, 0.53458740234375, 0.5349119873046875, 0.5347153930664063, 0.5349099731445313, 0.5344542846679687, 0.5348812866210938, 0.5343672485351563, 0.5352898559570313, 0.5349918823242188, 0.5351209106445313, 0.5350277099609375, 0.5352345581054687, 0.5350748291015625, 0.5352857666015625, 0.5347102661132812, 0.5349519653320313, 0.5349539794921875, 0.5350645751953125, 0.534740966796875, 0.5353809814453125, 0.5346959228515625, 0.5358069458007813, 0.53460888671875, 0.5353052368164063, 0.5350338745117188, 0.5352212524414063, 0.53507275390625, 0.5351802978515625, 0.5347666015625, 0.5349642333984375, 0.53477783203125, 0.535235595703125, 0.5350543212890625, 0.5352120361328125, 0.5350082397460938, 0.5354352416992187, 0.5352120361328125, 0.5354669799804688, 0.535140380859375, 0.535362548828125, 0.534782958984375, 0.5354803466796875, 0.5349232788085938, 0.5352908935546875, 0.535625732421875, 0.5352755126953125, 0.53500732421875, 0.5353860473632812]",tokens/s,1.8405291171591998,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1591.713792,2250.768384,0.0,1604.32128,1463.693312,s,10,1.2648313903808592,0.12648313903808595,0.0013989604614154947,0.1262478713989258,0.1271006202697754,0.1287280216217041,0.13002994270324708,"[0.1303554229736328, 0.12560326385498047, 0.12551904296875, 0.12673897552490235, 0.12573398590087892, 0.12508892822265624, 0.12599820709228515, 0.1264975357055664, 0.1265692138671875, 0.12672681427001953]",tokens/s,2023.9851884361806,kWh,1.4855278241965507e-06,8.139949754399822e-07,6.45409544104969e-06,8.753618240686224e-06,tokens/kWh,29245049.64245863,MB,1591.713792,2250.768384,0.0,1604.32128,1560.975872,s,10,72.9124365234375,7.291243652343749,0.003929707108860259,7.2908349609375005,7.297245703125,7.2972966796875,7.2973374609375,"[7.297234375, 7.2856533203125, 7.29734765625, 7.29276513671875, 7.29289794921875, 7.29063818359375, 7.29089404296875, 7.29077587890625, 7.28937060546875, 7.284859375]",tokens/s,8.640501264794358,kWh,8.609067341933647e-05,4.718392125277432e-05,0.00037183251274354733,0.0005051071074156582,tokens/kWh,124726.02162011672,,s,629,73.93082885742194,0.11753708880353239,0.015022143769719637,0.11565261077880859,0.11614679260253907,0.11651727142333984,0.24159293273925783,"[0.11554611206054688, 0.11542835235595703, 0.11560959625244141, 0.11652607727050782, 0.11558604431152343, 0.1156341781616211, 0.11573760223388672, 0.1157570571899414, 0.11565670776367187, 0.11562393951416015, 0.11558502197265624, 0.11574476623535156, 0.1157580795288086, 0.11542835235595703, 0.11543961334228516, 0.11608268737792969, 0.1160478744506836, 0.11690393829345704, 0.11582669067382813, 0.11567513275146485, 0.1154303970336914, 0.11542835235595703, 0.1157232666015625, 0.1157694091796875, 0.11561158752441406, 0.11565261077880859, 0.11558297729492187, 0.11557170867919922, 0.11555225372314454, 0.11560857391357422, 0.11558604431152343, 0.11568946838378906, 0.11568537902832031, 0.11553177642822265, 0.1154703369140625, 0.11592601776123047, 0.11566284942626953, 0.11548159790039063, 0.11579289245605469, 0.11569561767578125, 0.11579801940917969, 0.11614924621582032, 0.11590553283691406, 0.1172838363647461, 0.11599565124511718, 0.11589427185058594, 0.11579392242431641, 0.11582054138183594, 0.11560550689697266, 0.11576937866210937, 0.11679228973388672, 0.11623014068603515, 0.115884033203125, 0.11555532836914062, 0.11562290954589843, 0.11581132507324218, 0.11705241394042969, 0.11600486755371094, 0.11588607788085938, 0.115736572265625, 0.11612569427490234, 0.11604176330566406, 0.24190666198730468, 0.11594547271728516, 0.11553177642822265, 0.1155819549560547, 0.1155051498413086, 0.11557990264892579, 0.11673600006103516, 0.11557376098632813, 0.11559117126464843, 0.1155072021484375, 0.11576525115966797, 0.11566079711914062, 0.11590656280517578, 0.115525634765625, 0.11559219360351562, 0.1160847396850586, 0.11556658935546875, 0.1156147232055664, 0.11551641845703126, 0.11533209228515626, 0.11555328369140624, 0.11560147094726562, 0.11554502105712891, 0.11557376098632813, 0.1158809585571289, 0.11545088195800782, 0.11558604431152343, 0.11544985961914063, 0.11559321594238281, 0.11556761932373047, 0.11569868469238281, 0.11571097564697265, 0.11561676788330078, 0.11552051544189453, 0.11553997039794922, 0.11532393646240234, 0.11541606140136719, 0.11545494079589844, 0.11565055847167968, 0.11606527709960937, 0.11634278106689454, 0.1155440673828125, 0.11550105285644531, 0.11538432312011719, 0.11557785797119141, 0.11540582275390625, 0.11706163024902344, 0.11553382110595703, 0.11559321594238281, 0.11543244934082031, 0.11545804595947265, 0.11543142700195312, 0.11540480041503906, 0.11530547332763671, 0.11539667510986328, 0.11571295928955078, 0.11585740661621094, 0.11560447692871094, 0.11563622283935547, 0.11560550689697266, 0.11555430603027343, 0.11554303741455078, 0.1156147232055664, 0.24147865295410156, 0.11553382110595703, 0.11642678070068359, 0.11596393585205078, 0.11609900665283203, 0.11560550689697266, 0.11545906829833984, 0.11533824157714843, 0.11538944244384766, 0.11619737243652344, 0.11612876892089843, 0.11601612854003907, 0.1155758056640625, 0.11589433288574219, 0.1156719970703125, 0.11552870178222656, 0.11574681854248046, 0.11581337738037109, 0.11625574493408203, 0.11572531127929687, 0.11590656280517578, 0.11556454467773437, 0.11557075500488281, 0.1164062042236328, 0.11572736358642578, 0.11559935760498047, 0.1159710693359375, 0.11614105224609375, 0.11683942413330078, 0.11663980865478515, 0.11592697906494141, 0.11562598419189453, 0.11584307098388671, 0.11552973175048828, 0.11597516632080078, 0.11550310516357422, 0.11585126495361328, 0.11633869171142579, 0.11576121520996094, 0.11590239715576171, 0.11548057556152344, 0.11562700653076172, 0.11580518341064452, 0.11599974060058593, 0.11608370971679688, 0.11642265319824219, 0.11600179290771484, 0.11641446685791015, 0.11563212585449219, 0.11547647857666016, 0.11545088195800782, 0.11568032073974609, 0.11588703918457031, 0.11573350524902344, 0.1157201919555664, 0.11566182708740234, 0.11576627349853516, 0.11582566070556641, 0.11572736358642578, 0.11544780731201172, 0.11562393951416015, 0.11546828460693359, 0.11547853088378907, 0.2430136260986328, 0.1159004135131836, 0.11564236450195313, 0.11563827514648438, 0.11565567779541015, 0.11559526062011719, 0.11573350524902344, 0.11556658935546875, 0.1157396469116211, 0.1160284194946289, 0.11578982543945313, 0.11594035339355468, 0.11562290954589843, 0.11544985961914063, 0.11558502197265624, 0.11559117126464843, 0.11605811309814452, 0.11588301086425781, 0.1157949447631836, 0.11583795166015624, 0.11589734649658204, 0.11614617919921875, 0.11590348815917968, 0.1155440673828125, 0.1166714859008789, 0.11601510620117188, 0.11548365020751954, 0.11576012420654297, 0.11538944244384766, 0.11538944244384766, 0.11550617980957031, 0.1161164779663086, 0.11625676727294922, 0.11571814727783203, 0.11565875244140625, 0.1156485137939453, 0.11559117126464843, 0.11533106994628907, 0.11534438323974609, 0.115957763671875, 0.11574066925048829, 0.11546316528320312, 0.11652095794677735, 0.11555532836914062, 0.11549081420898437, 0.11547443389892578, 0.11542630767822265, 0.11534438323974609, 0.11527986907958984, 0.11548365020751954, 0.11580825805664062, 0.1169459228515625, 0.11576729583740235, 0.11597516632080078, 0.11551849365234375, 0.11617174530029296, 0.1161553955078125, 0.11608678436279297, 0.11566079711914062, 0.11565261077880859, 0.11548365020751954, 0.1155758056640625, 0.11557376098632813, 0.2416373748779297, 0.11573760223388672, 0.11561881256103515, 0.11579596710205078, 0.11593830108642578, 0.115810302734375, 0.11552870178222656, 0.11581542205810547, 0.11552665710449218, 0.11582771301269532, 0.1157396469116211, 0.11553791809082031, 0.1157027816772461, 0.11587789154052734, 0.11567513275146485, 0.11574169921875, 0.11559833526611328, 0.11615334320068359, 0.11550822448730469, 0.11546316528320312, 0.11542733001708984, 0.11593523406982421, 0.11576217651367188, 0.11546521759033203, 0.11530751800537109, 0.11534848022460938, 0.11562496185302734, 0.11573248291015625, 0.11568946838378906, 0.11568025970458984, 0.1157570571899414, 0.11558809661865234, 0.11546009826660156, 0.11551436614990235, 0.11556864166259766, 0.11568851470947265, 0.11561977386474609, 0.11564441680908204, 0.11560243225097656, 0.11574374389648437, 0.11693260955810547, 0.11600179290771484, 0.11581951904296875, 0.11551337432861328, 0.11621782684326172, 0.11561574554443359, 0.11616153717041015, 0.11619328308105469, 0.11597926330566406, 0.11552153778076171, 0.11582157135009766, 0.11549798583984375, 0.11565773010253906, 0.11573868560791016, 0.11633657836914063, 0.11591986846923828, 0.11580723571777343, 0.11681075286865235, 0.11580210876464844, 0.11566284942626953, 0.11573554992675782, 0.11571405029296875, 0.11579392242431641, 0.24123802185058593, 0.11577037048339844, 0.11569152069091797, 0.11550924682617188, 0.11548262023925782, 0.11541299438476563, 0.11537100982666015, 0.11548979187011718, 0.11538540649414063, 0.11682399749755859, 0.11572531127929687, 0.11558911895751953, 0.11550003051757812, 0.11544576263427735, 0.11544678497314453, 0.11560755157470703, 0.11546828460693359, 0.11556249237060547, 0.1156485137939453, 0.11555023956298828, 0.11552047729492188, 0.11564236450195313, 0.11559014129638671, 0.11573452758789063, 0.11543859100341797, 0.11551641845703126, 0.11565465545654297, 0.11574988555908203, 0.11569971466064453, 0.11559935760498047, 0.11563724517822266, 0.11556044769287109, 0.11579084777832031, 0.11558399963378906, 0.11560345458984375, 0.11583385467529297, 0.11566182708740234, 0.11573350524902344, 0.11707392120361328, 0.11604377746582031, 0.11578166198730469, 0.11570275115966797, 0.11584614562988281, 0.11643289947509766, 0.11766886138916016, 0.11568742370605468, 0.11568131256103516, 0.1155184326171875, 0.11561779022216796, 0.11559219360351562, 0.1157734375, 0.1157580795288086, 0.11578675079345703, 0.11578163146972656, 0.11558399963378906, 0.1155758056640625, 0.11558399963378906, 0.11569766235351563, 0.11568230438232421, 0.11554412841796875, 0.11554502105712891, 0.11572531127929687, 0.11579392242431641, 0.24222003173828124, 0.11544473266601563, 0.11548876953125, 0.11571814727783203, 0.11562905883789062, 0.1165486068725586, 0.11599871826171874, 0.11563724517822266, 0.11574374389648437, 0.11579801940917969, 0.11553382110595703, 0.11554611206054688, 0.11684249877929688, 0.11557273864746094, 0.11547853088378907, 0.1154877471923828, 0.11573350524902344, 0.11547853088378907, 0.11548876953125, 0.11556556701660156, 0.1155973129272461, 0.11554815673828125, 0.1156280288696289, 0.1164400634765625, 0.115525634765625, 0.11554815673828125, 0.11559321594238281, 0.11538022613525391, 0.11541510772705078, 0.11557574462890625, 0.11551538848876954, 0.11658854675292969, 0.11569561767578125, 0.11581132507324218, 0.11544371032714844, 0.1155594253540039, 0.11566284942626953, 0.11575603485107422, 0.1155072021484375, 0.11597618865966797, 0.11624857330322266, 0.11573554992675782, 0.11539968109130859, 0.11556352233886719, 0.11553997039794922, 0.1154119644165039, 0.11562086486816406, 0.11566899108886719, 0.11578470611572265, 0.11621580505371094, 0.11563520050048828, 0.11619840240478516, 0.11569459533691406, 0.11568230438232421, 0.11591270446777344, 0.11579801940917969, 0.11572531127929687, 0.11583692932128906, 0.11580006408691407, 0.11551436614990235, 0.11591372680664062, 0.11565055847167968, 0.11566796875, 0.2425927734375, 0.11592601776123047, 0.11652812957763672, 0.1160263671875, 0.1156280288696289, 0.1154703369140625, 0.11540684509277344, 0.11567411041259766, 0.11576422119140625, 0.11549388885498046, 0.11578163146972656, 0.11591372680664062, 0.11623423767089844, 0.11559321594238281, 0.11569459533691406, 0.11551744079589844, 0.11644620513916015, 0.1155645751953125, 0.11573654174804687, 0.11594445037841797, 0.11607039642333984, 0.1157232666015625, 0.11550310516357422, 0.11549900817871094, 0.11566694641113281, 0.11567616271972657, 0.11566902160644531, 0.1161082534790039, 0.11594137573242187, 0.11565158081054687, 0.11552153778076171, 0.11549702453613281, 0.11560646057128907, 0.11540275573730469, 0.11600281524658203, 0.11596390533447265, 0.11555123138427735, 0.11542118072509766, 0.11550617980957031, 0.11543654632568359, 0.11562700653076172, 0.11552051544189453, 0.11569356536865234, 0.11555020904541016, 0.11564236450195313, 0.11563622283935547, 0.11566694641113281, 0.11554611206054688, 0.11565567779541015, 0.11542425537109376, 0.11545600128173829, 0.11553587341308594, 0.11576729583740235, 0.11622911834716797, 0.11651174163818359, 0.11568844604492187, 0.11576831817626954, 0.11554713439941407, 0.11568025970458984, 0.11566694641113281, 0.11580620574951171, 0.11560345458984375, 0.11570893096923827, 0.24270541381835936, 0.11573554992675782, 0.1154734115600586, 0.115525634765625, 0.11553279876708984, 0.11555430603027343, 0.11576525115966797, 0.11567922973632813, 0.11552665710449218, 0.11537715148925781, 0.1155041275024414, 0.11540991973876953, 0.11538329315185547, 0.11551641845703126, 0.11542937469482421, 0.11544576263427735, 0.11548365020751954, 0.11621273803710938, 0.11560447692871094, 0.11579801940917969, 0.11566796875, 0.11572531127929687, 0.11551750183105469, 0.1155849609375, 0.11561062622070313, 0.11598028564453125, 0.11561676788330078, 0.11556147003173828, 0.11561676788330078, 0.11551436614990235, 0.11571302032470702, 0.11565875244140625, 0.11572223663330078, 0.11552973175048828, 0.11562290954589843, 0.11551747131347656, 0.11566793823242187, 0.11573760223388672, 0.11573760223388672, 0.11582463836669922, 0.1156147232055664, 0.11548880004882812, 0.1162034912109375, 0.11573350524902344, 0.11611344146728515, 0.11572425842285156, 0.11567616271972657, 0.11576217651367188, 0.11560352325439453, 0.11609388732910156, 0.11659980773925781, 0.11586048126220704, 0.11570381164550782, 0.11573248291015625, 0.11557170867919922, 0.11563827514648438, 0.11605709075927734, 0.1166714859008789, 0.1158123550415039, 0.11569971466064453, 0.11564543914794922, 0.11563629150390625, 0.11554707336425782, 0.24293376159667968, 0.1157570571899414, 0.11543551635742187, 0.11561164855957032, 0.11565161895751953, 0.11541910552978515, 0.11535257720947266, 0.11540790557861329, 0.1153371810913086, 0.11557478332519532, 0.11545394897460938, 0.11548365020751954, 0.11608576202392579, 0.11585740661621094, 0.1155758056640625, 0.11546419525146484, 0.11544882965087891, 0.11567922973632813, 0.11579596710205078, 0.11548467254638672, 0.11567820739746094, 0.11553075408935547, 0.11560447692871094, 0.11557683563232422, 0.11538841247558594, 0.11541709136962891, 0.11647283172607421, 0.11568946838378906, 0.11550924682617188, 0.11575603485107422, 0.11594035339355468, 0.11566387176513672, 0.11554713439941407, 0.11556761932373047, 0.11553997039794922, 0.11574578857421874, 0.1154959716796875, 0.11552867126464844, 0.11542527770996094, 0.11553689575195313, 0.11548262023925782, 0.11609190368652343, 0.11563314819335938, 0.11550105285644531, 0.11568844604492187, 0.1156341781616211, 0.11574681854248046, 0.11630079650878906, 0.11589119720458985, 0.1157027816772461, 0.11557170867919922, 0.11571609497070312, 0.11571916961669922, 0.11559321594238281, 0.1154549789428711, 0.1155389404296875, 0.11564543914794922, 0.11553587341308594, 0.11558604431152343, 0.11541913604736329, 0.11550822448730469, 0.11574272155761718, 0.11551026916503906]",tokens/s,8.507952767756032,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2935.840768,9548.857344,0.0,8902.41024,8265.321472,s,10,10.732376098632812,1.0732376098632814,0.0018882051518454717,1.0727980346679686,1.0744577026367188,1.076272918701172,1.0777250915527343,"[1.078088134765625, 1.0709527587890626, 1.071809814453125, 1.072758056640625, 1.0723739013671876, 1.0717447509765625, 1.0728380126953125, 1.07379296875, 1.0740543212890625, 1.07396337890625]",tokens/s,238.53058972897117,kWh,1.2652489013142056e-05,6.933077765861525e-06,5.8725796980607646e-05,7.831136375961123e-05,tokens/kWh,3269001.939307702,MB,2940.198912,9548.857344,0.0,8902.41024,8556.582912,s,10,636.0469335937499,63.60469335937499,0.008753951962243356,63.60478125,63.61318203125,63.615917187499996,63.6181053125,"[63.59721875, 63.61257421875, 63.61096484375, 63.6103046875, 63.61865234375, 63.5982421875, 63.6112421875, 63.58923828125, 63.5992578125, 63.59923828125]",tokens/s,0.9904929443499021,kWh,0.0007508853809701072,0.00041155162046943583,0.003498196520777186,0.00466063352221673,tokens/kWh,13517.475617785843,,s,629,644.6920160522462,1.0249475612913292,0.12743236447478887,1.0094981079101562,1.01038447265625,1.0106333129882812,2.0813571875,"[1.0103838500976563, 1.011209228515625, 1.0106275634765625, 1.010282470703125, 1.0090455322265626, 1.00902294921875, 1.0092001342773438, 1.0092001342773438, 1.0087454833984375, 1.0091202392578125, 1.0090209350585937, 1.0097459106445312, 1.009122314453125, 1.0094325561523438, 1.0090762329101564, 1.008990234375, 1.009164306640625, 1.0090936279296876, 1.0088233032226563, 1.009649658203125, 1.0089727783203124, 1.0095328979492189, 1.009112060546875, 1.0095298461914062, 1.0089553833007812, 1.0094817504882811, 1.0095001831054689, 1.0089779052734376, 1.0089512939453125, 1.0092308349609376, 1.0093475952148439, 1.0095062866210938, 1.0100172729492187, 1.0097244262695313, 1.0091735229492187, 1.0095360107421876, 1.0095134887695312, 1.0093209838867188, 1.009239013671875, 1.00967529296875, 1.0096640014648437, 1.00973974609375, 1.0092687377929688, 1.009269775390625, 1.0091714477539062, 1.0091786499023438, 1.0094950561523437, 1.0094622802734374, 1.0091345825195313, 1.009670166015625, 1.0100039672851562, 1.0096732177734375, 1.0092994384765626, 1.0093557739257812, 1.0091253662109374, 1.00938134765625, 1.0091427612304686, 1.0090465087890625, 1.0089072875976564, 1.0098462524414062, 1.010165771484375, 1.0093465576171874, 2.08412060546875, 1.0087465209960937, 1.0093834228515626, 1.0092031860351562, 1.0096558227539063, 1.0095739135742188, 1.0104360961914063, 1.0100264892578126, 1.0097346801757812, 1.0098964233398438, 1.0101534423828125, 1.00982373046875, 1.009934326171875, 1.0099998779296875, 1.0098063354492188, 1.0099056396484376, 1.0100838623046875, 1.0100254516601563, 1.00965478515625, 1.0096107788085937, 1.0098421630859375, 1.0097664184570312, 1.0100264892578126, 1.0098104248046875, 1.0099118041992188, 1.0101432495117189, 1.010134033203125, 1.0106337280273439, 1.0090772705078126, 1.0097838134765624, 1.0102476806640626, 1.0099251098632813, 1.0096466064453125, 1.00933837890625, 1.0092185668945313, 1.0092902221679687, 1.0095257568359375, 1.00948583984375, 1.0100172729492187, 1.010408447265625, 1.0100695190429687, 1.0098995361328125, 1.0105394897460938, 1.00971826171875, 1.0095902709960938, 1.0096414794921875, 1.009565673828125, 1.00931787109375, 1.0096066284179688, 1.0097407836914063, 1.009343505859375, 1.0096947021484375, 1.0099138793945313, 1.009455078125, 1.0092472534179688, 1.0096680908203124, 1.0096250610351563, 1.0100101318359376, 1.0093414306640625, 1.0092892456054687, 1.009311767578125, 1.009322998046875, 1.0097203369140626, 2.081078369140625, 1.009328125, 1.0090977172851563, 1.0092052612304687, 1.0094520263671876, 1.0091028442382812, 1.0091796264648438, 1.0086420288085938, 1.0094274291992187, 1.0089891967773437, 1.0090137329101563, 1.0094161987304688, 1.0096568603515625, 1.0095421142578125, 1.009850341796875, 1.0090895385742187, 1.0094059448242187, 1.009059814453125, 1.0094509887695313, 1.0088161010742187, 1.008974853515625, 1.0091888427734375, 1.0095595703125, 1.009016845703125, 1.0097346801757812, 1.0089810180664063, 1.00931787109375, 1.0090147705078125, 1.0099138793945313, 1.0100213623046874, 1.0104381713867188, 1.010629638671875, 1.010713623046875, 1.0101360473632812, 1.0105651245117186, 1.0090690307617187, 1.0092144775390626, 1.009344482421875, 1.01054052734375, 1.0098954467773438, 1.0101473388671875, 1.010071533203125, 1.0109757690429688, 1.0099885864257812, 1.010386962890625, 1.0099199829101562, 1.0098933715820313, 1.0100418701171876, 1.0103040161132812, 1.0101207275390625, 1.0105702514648438, 1.0102599487304686, 1.00997119140625, 1.0098646850585937, 1.0102732543945312, 1.0096066284179688, 1.00906494140625, 1.0095984497070312, 1.0102210693359375, 1.0098585815429688, 1.0101514282226562, 1.010039794921875, 1.0104360961914063, 2.082943115234375, 1.009787841796875, 1.0099476318359375, 1.0097244262695313, 1.0104217529296875, 1.0104340209960938, 1.0096947021484375, 1.0098309326171875, 1.009544189453125, 1.0100009155273437, 1.01024560546875, 1.0097223510742188, 1.0100695190429687, 1.0102835083007813, 1.009902587890625, 1.0104699096679688, 1.0107535400390626, 1.0102671508789063, 1.010640869140625, 1.0102630615234376, 1.0101463012695313, 1.0099834594726562, 1.0103336791992188, 1.0099384155273436, 1.0103685302734375, 1.0097254638671875, 1.0096906127929688, 1.0098125, 1.0103756713867187, 1.0102271728515626, 1.010640869140625, 1.0090096435546876, 1.0095994873046874, 1.0097633056640625, 1.0101729125976562, 1.0090198974609375, 1.0095718383789063, 1.0087034912109376, 1.009523681640625, 1.00916943359375, 1.0090352783203125, 1.0087833862304687, 1.0096363525390626, 1.0092359619140625, 1.0092656860351563, 1.0091233520507812, 1.00946533203125, 1.008932861328125, 1.009027099609375, 1.0091468505859376, 1.0092257080078124, 1.0089072875976564, 1.008837646484375, 1.0091827392578125, 1.0094110717773437, 1.0090751953125, 1.0094848022460938, 1.0091868286132812, 1.009480712890625, 1.0097039184570313, 1.0093670654296876, 1.0094059448242187, 1.0094376831054688, 2.081429443359375, 1.009227783203125, 1.0094479370117186, 1.010234375, 1.0106552124023438, 1.010882568359375, 1.0105589599609375, 1.0087034912109376, 1.0110679321289062, 1.0107822265625, 1.0108876953125, 1.0104412231445312, 1.0089584350585938, 1.0093711547851563, 1.0090792846679688, 1.0109276123046875, 1.0108334350585937, 1.0103521118164063, 1.0105712890625, 1.0102968139648438, 1.0089482421875, 1.0090025024414062, 1.0100203247070312, 1.0104258422851562, 1.009818603515625, 1.0096087036132813, 1.0098165893554687, 1.0094642944335936, 1.0101575927734374, 1.0103203735351562, 1.0114457397460936, 1.0106500854492189, 1.010361328125, 1.010103271484375, 1.0099415283203126, 1.0093311767578126, 1.0099465942382813, 1.0090475463867188, 1.0093302001953126, 1.0094981079101562, 1.0090823974609375, 1.0102753295898437, 1.0096322631835937, 1.0092564697265625, 1.0091519775390625, 1.0096640014648437, 1.009281005859375, 1.0090864868164062, 1.0091448364257813, 1.0099537963867187, 1.0093250732421875, 1.0095186157226563, 1.0094940185546875, 1.00933837890625, 1.0094080200195312, 1.0091744995117187, 1.0092451782226564, 1.0095390625, 1.0101217041015624, 1.01006640625, 1.0101780395507813, 1.009744873046875, 1.0093660278320313, 2.08117138671875, 1.0091130981445313, 1.009100830078125, 1.0086492309570312, 1.0088734741210938, 1.0091294555664063, 1.0090188598632812, 1.0089410400390626, 1.009344482421875, 1.0095523681640626, 1.0093916015625, 1.0090680541992187, 1.00940185546875, 1.009154052734375, 1.0091970825195313, 1.0093629150390624, 1.0088673095703125, 1.0088601684570313, 1.009817626953125, 1.009006591796875, 1.0100746459960936, 1.009349609375, 1.0093670654296876, 1.0091519775390625, 1.0093035278320313, 1.0091581420898437, 1.0092021484375, 1.0091058959960937, 1.01058251953125, 1.0097705078125, 1.0106183471679688, 1.0098779907226563, 1.0096998291015624, 1.0104105224609374, 1.0094213256835938, 1.009344482421875, 1.0096097412109375, 1.0097407836914063, 1.0099261474609376, 1.009217529296875, 1.0095718383789063, 1.0091837158203125, 1.0093905639648437, 1.0092267456054687, 1.0091878662109375, 1.0096301879882812, 1.0098125, 1.0094929809570312, 1.009860595703125, 1.0093219604492187, 1.0101544799804687, 1.009301513671875, 1.0094714965820313, 1.009460205078125, 1.0093588256835937, 1.0097899780273438, 1.0095984497070312, 1.0096742553710938, 1.010693115234375, 1.0096957397460937, 1.010155517578125, 1.010134033203125, 1.00997119140625, 2.082757568359375, 1.0091796264648438, 1.0095452270507812, 1.0092155151367188, 1.0096373901367188, 1.00949609375, 1.0099005737304687, 1.0090813598632813, 1.009080322265625, 1.008932861328125, 1.0091448364257813, 1.008964599609375, 1.00949609375, 1.0093455200195312, 1.0097909545898438, 1.0094417724609375, 1.00999267578125, 1.0092533569335937, 1.009428466796875, 1.0093148193359376, 1.0098770141601563, 1.0099015502929687, 1.0105159912109376, 1.0107003173828124, 1.0108948364257813, 1.009523681640625, 1.010176025390625, 1.009207275390625, 1.0096517333984374, 1.0096107788085937, 1.010208740234375, 1.0101217041015624, 1.0100725708007812, 1.0099415283203126, 1.01014013671875, 1.009311767578125, 1.0097100830078125, 1.0091714477539062, 1.0097858276367186, 1.0092830810546876, 1.0094745483398437, 1.009259521484375, 1.0102118530273438, 1.0100234375, 1.0097469482421875, 1.010239501953125, 1.0100633544921875, 1.0095165405273439, 1.0095984497070312, 1.0094940185546875, 1.0099507446289062, 1.0094642944335936, 1.0101094360351563, 1.0097879028320313, 1.0109890747070311, 1.0111426391601563, 1.0094315795898436, 1.0090772705078126, 1.009227783203125, 1.0092001342773438, 1.009723388671875, 1.00982373046875, 1.0097694702148436, 2.08317333984375, 1.0093322143554688, 1.00935986328125, 1.0087188720703124, 1.0093055419921875, 1.008848876953125, 1.0092216186523437, 1.0090895385742187, 1.0095001831054689, 1.0094622802734374, 1.0091847534179688, 1.0095789794921874, 1.0093875122070313, 1.0095748901367188, 1.0098646850585937, 1.0092850952148438, 1.0101329956054688, 1.0093475952148439, 1.0094888916015625, 1.00940185546875, 1.0091079711914062, 1.0090505981445312, 1.0093823852539063, 1.0089502563476562, 1.009269775390625, 1.0090301513671875, 1.00952783203125, 1.0098544921875, 1.0100684814453125, 1.0095472412109374, 1.0094541015625, 1.0092247314453124, 1.0096537475585938, 1.0091100463867186, 1.0094458618164062, 1.0091038818359375, 1.0096301879882812, 1.0089943237304688, 1.0096271362304687, 1.009623046875, 1.0094694213867188, 1.009554443359375, 1.0093568115234375, 1.0092083129882812, 1.0091427612304686, 1.0093660278320313, 1.010423828125, 1.0095114135742187, 1.0090711059570312, 1.0094817504882811, 1.009312744140625, 1.0092687377929688, 1.009132568359375, 1.0089052124023437, 1.0089922485351563, 1.0089625854492188, 1.0091192016601562, 1.00922265625, 1.0094295043945312, 1.0091376342773437, 1.0093772583007812, 1.0091202392578125, 1.0095810546875, 2.08344580078125, 1.0095042724609375, 1.0109685668945312, 1.0099476928710938, 1.01062548828125, 1.0095011596679688, 1.0104494018554688, 1.008996337890625, 1.0095462646484374, 1.008911376953125, 1.00917041015625, 1.0089707641601562, 1.00910693359375, 1.0093240356445312, 1.0093403930664062, 1.0089093017578126, 1.009713134765625, 1.0089676513671875, 1.0090844116210937, 1.0088786010742188, 1.00897998046875, 1.0090782470703126, 1.0091878662109375, 1.0094356689453126, 1.009923095703125, 1.0092708129882813, 1.0097356567382811, 1.008616455078125, 1.0090946655273438, 1.0091868286132812, 1.0094305419921874, 1.0093619384765624, 1.0100254516601563, 1.0098093872070313, 1.0101801147460938, 1.0097049560546876, 1.010398193359375, 1.0095513305664063, 1.0100848388671875, 1.0106326904296874, 1.011041259765625, 1.010619384765625, 1.0100930786132813, 1.0094878540039063, 1.0097684326171874, 1.0093609008789062, 1.0095360107421876, 1.009154052734375, 1.0090618896484376, 1.009249267578125, 1.009196044921875, 1.0090895385742187, 1.0093352661132813, 1.0091714477539062, 1.0094796752929687, 1.0091049194335937, 1.0094694213867188, 1.0094202880859375, 1.009227783203125, 1.0093311767578126, 1.0093629150390624, 1.0093004760742188, 1.0094376831054688, 2.083451904296875, 1.0096097412109375, 1.0098380737304689, 1.009165283203125, 1.0100848388671875, 1.0104432373046874, 1.0090741577148437, 1.0095851440429688, 1.0092666625976563, 1.0088263549804688, 1.0094428100585937, 1.0090045166015624, 1.0094745483398437, 1.0092830810546876, 1.0091581420898437, 1.0089246826171876, 1.0092728271484375, 1.009238037109375, 1.0095165405273439, 1.0092728271484375, 1.009455078125, 1.00973876953125, 1.009924072265625, 1.0094458618164062, 1.0094541015625, 1.0086737670898438, 1.009306640625, 1.009659912109375, 1.0100695190429687, 1.0101934204101561, 1.0098739013671876, 1.0097745971679688, 1.0097152099609374, 1.0097776489257813, 1.0097192993164064, 1.0093025512695313, 1.0096865234375, 1.009438720703125, 1.0101616821289063, 1.0098831176757812, 1.0098104248046875, 1.0097673950195312, 1.01049853515625, 1.0099834594726562, 1.0104944458007812, 1.0097633056640625, 1.0092708129882813, 1.009201171875, 1.0093056030273437, 1.0092236938476562, 1.0090127563476563, 1.0091929321289062, 1.00919091796875, 1.0090670166015625, 1.0091888427734375, 1.008996337890625, 1.0091161499023438, 1.0092789916992186, 1.0097572021484376, 1.0099056396484376, 1.0091837158203125, 1.0094663696289063, 1.0093506469726563]",tokens/s,0.9756596705690017,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3182-5adb6dbe4ebdd35419a86ac0;2ac0361c-718c-4292-bfb6-007b970fedbd) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe41-1a9210d35353b33917501cb7;16e986eb-aa76-4675-8a67-fd5e627e22fb) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1441.001472,1646.788608,0.0,1000.341504,901.382144,s,10,1.2824376068115233,0.12824376068115234,0.0013381948349874154,0.12786808013916018,0.12959590759277345,0.1305943862915039,0.13139316925048827,"[0.13159286499023437, 0.12686815643310548, 0.1274053421020508, 0.1281833953857422, 0.12710332489013673, 0.12755276489257814, 0.12878483581542968, 0.12818998718261718, 0.1273829116821289, 0.1293740234375]",tokens/s,1996.1984788989712,kWh,1.5084334378001057e-06,8.263029694510034e-07,5.418130213543536e-06,7.752866620794645e-06,tokens/kWh,33020044.394077398,MB,1441.001472,1646.788608,0.0,1000.341504,932.76416,s,10,76.77896875,7.677896875,0.023249628538630225,7.67915625,7.703863232421875,7.706249243164062,7.708158051757812,"[7.64836474609375, 7.62882421875, 7.67977490234375, 7.69081591796875, 7.67853759765625, 7.7033330078125, 7.70863525390625, 7.67114794921875, 7.67421923828125, 7.69531591796875]",tokens/s,8.205371995179345,kWh,9.095814451596237e-05,4.985198124346679e-05,0.0003132890969742542,0.00045409922273368346,tokens/kWh,138736.19871168057,,s,629,77.8087404022218,0.12370228998763382,0.015267564948821125,0.12151500701904297,0.12351795043945313,0.12392367401123047,0.24871362792968751,"[0.12091289520263672, 0.12134502410888671, 0.1254635543823242, 0.12273868560791015, 0.12172492980957031, 0.12116480255126953, 0.12129894256591797, 0.12113715362548828, 0.12124364471435548, 0.12089241790771485, 0.12106547546386719, 0.12196761322021485, 0.12150886535644531, 0.12106854248046875, 0.12139520263671875, 0.12140544128417968, 0.12128768157958984, 0.12113919830322266, 0.12091187286376953, 0.12113407897949219, 0.1221427230834961, 0.12104601287841797, 0.12148633575439453, 0.12152934265136718, 0.1209886703491211, 0.12108493041992187, 0.12132966613769532, 0.1210931167602539, 0.12157440185546875, 0.12151500701904297, 0.12138393402099609, 0.12142899322509766, 0.12107263946533203, 0.12096717071533203, 0.12105113220214844, 0.1210091552734375, 0.12110643005371094, 0.12125798034667969, 0.12106854248046875, 0.12104908752441407, 0.12095283508300782, 0.12088114929199219, 0.12084735870361328, 0.12105830383300781, 0.12114125061035157, 0.12377804565429687, 0.12163993835449219, 0.12108595275878906, 0.12111154937744141, 0.12169010925292968, 0.12106240081787109, 0.12132556915283203, 0.12277247619628906, 0.12324864196777344, 0.12130406188964844, 0.12083404541015624, 0.12118937683105468, 0.12106240081787109, 0.1208463363647461, 0.1210245132446289, 0.12098560333251954, 0.12108799743652343, 0.2477936706542969, 0.12073062133789063, 0.12093030548095703, 0.12108595275878906, 0.12114227294921875, 0.12072959899902344, 0.12111666870117188, 0.12076544189453126, 0.12109209442138671, 0.12101427459716797, 0.12082688140869141, 0.12099072265625, 0.12134092712402343, 0.12110028839111328, 0.12076236724853516, 0.12246937561035157, 0.12178125, 0.12127129364013672, 0.12102041625976563, 0.12081561279296875, 0.12112179565429687, 0.12079513549804688, 0.12220722961425781, 0.12136243438720704, 0.12094873809814453, 0.12121804809570312, 0.12110131072998047, 0.12102553558349609, 0.12125389099121094, 0.12094258880615234, 0.12103270721435547, 0.12157030487060547, 0.12122316741943359, 0.1208832015991211, 0.12120269012451172, 0.12115455627441406, 0.12125081634521484, 0.12102861022949218, 0.1209169921875, 0.12092518615722657, 0.12124262237548829, 0.1210808334350586, 0.12104806518554688, 0.12156313323974609, 0.12109209442138671, 0.12071321868896484, 0.12101837158203126, 0.12109721374511719, 0.12089241790771485, 0.1209139175415039, 0.1210613784790039, 0.12095283508300782, 0.12115455627441406, 0.12093030548095703, 0.12107366180419922, 0.1208770523071289, 0.12091187286376953, 0.12103679656982422, 0.12085862731933594, 0.12101427459716797, 0.12090982055664062, 0.12072550201416016, 0.12163686370849609, 0.25078988647460937, 0.12208128356933594, 0.12245606231689453, 0.12228813171386718, 0.12169420623779297, 0.12159283447265624, 0.12156620788574218, 0.12150067138671874, 0.12323635101318359, 0.12156723022460937, 0.12253695678710938, 0.12134194946289062, 0.12189183807373047, 0.12104499053955078, 0.12240998077392579, 0.12145868682861329, 0.12170240020751953, 0.12159999847412109, 0.1212907485961914, 0.12119961547851563, 0.12125183868408203, 0.12093644714355468, 0.12158975982666016, 0.1212252197265625, 0.12197376251220703, 0.12266598510742187, 0.12199116516113281, 0.12252272033691407, 0.12169821166992187, 0.12251750183105468, 0.12150272369384765, 0.12135321807861328, 0.12139315032958985, 0.12348518371582032, 0.12360601806640625, 0.12340531158447265, 0.12198707580566406, 0.12240486145019532, 0.12276121520996094, 0.12286975860595703, 0.1258751983642578, 0.12238240051269532, 0.12162143707275391, 0.1215283203125, 0.12173107147216797, 0.12092723083496094, 0.12131942749023437, 0.12141260528564453, 0.12081356811523437, 0.12177101135253907, 0.12151705932617188, 0.12153241729736328, 0.12177305603027344, 0.12143001556396485, 0.1215836181640625, 0.1209692153930664, 0.12140850830078125, 0.12112588500976562, 0.1214576644897461, 0.12091085052490234, 0.12239871978759766, 0.1221212158203125, 0.12117196655273438, 0.25012428283691407, 0.12173414611816406, 0.12145049285888672, 0.12212838745117187, 0.12329779052734376, 0.12234649658203126, 0.12200857543945312, 0.12356403350830078, 0.12401152038574219, 0.12281446075439453, 0.12228300476074219, 0.12225945281982421, 0.12321791839599609, 0.1226219482421875, 0.1216880645751953, 0.12208537292480469, 0.12178636932373046, 0.122355712890625, 0.12368793487548828, 0.12342374420166016, 0.1231800308227539, 0.1235077133178711, 0.1234524154663086, 0.12143923187255859, 0.1212968978881836, 0.12111052703857422, 0.12108185577392579, 0.12117913818359374, 0.12229325103759765, 0.12225638580322265, 0.12117196655273438, 0.12148838043212891, 0.12117196655273438, 0.1209886703491211, 0.12110131072998047, 0.121565185546875, 0.12196659088134766, 0.12180480194091797, 0.1214382095336914, 0.12103884887695313, 0.1212221450805664, 0.12177510070800782, 0.1223034896850586, 0.12251136016845703, 0.12103372955322265, 0.12178125, 0.12100198364257812, 0.12146482849121094, 0.12118016052246093, 0.12082892608642579, 0.121312255859375, 0.1234698257446289, 0.12242534637451172, 0.12148735809326172, 0.12146790313720703, 0.12168396759033204, 0.12169216156005859, 0.12237516784667969, 0.12246527862548828, 0.12407603454589844, 0.1232701416015625, 0.1230223388671875, 0.1217976303100586, 0.24860671997070313, 0.12128256225585937, 0.121275390625, 0.12294041442871094, 0.12173926544189453, 0.12173004913330078, 0.12162873840332031, 0.12132447814941406, 0.12121600341796875, 0.12143103790283204, 0.12095078277587891, 0.12180377960205079, 0.12157952117919922, 0.12169216156005859, 0.12112179565429687, 0.12152320098876954, 0.12086579132080078, 0.12098252868652344, 0.12098047637939453, 0.12137574768066406, 0.12174646759033203, 0.12155900573730469, 0.12151193237304687, 0.12159999847412109, 0.12110540771484375, 0.121670654296875, 0.12152217864990235, 0.12107469177246094, 0.12170137786865234, 0.12137165069580078, 0.12135935974121094, 0.12147097778320312, 0.12331520080566406, 0.1227540512084961, 0.1215498275756836, 0.12133171081542969, 0.12187238311767579, 0.12289638519287109, 0.12396749114990234, 0.12359884643554687, 0.12377497863769531, 0.1236858901977539, 0.12344831848144532, 0.12326502227783204, 0.12415385437011718, 0.12243865966796875, 0.12206182098388672, 0.12256665802001954, 0.12256153869628907, 0.12152934265136718, 0.12154163360595703, 0.12200857543945312, 0.12149555206298829, 0.12161023712158203, 0.12224614715576172, 0.12173312377929688, 0.1210224609375, 0.12114022064208985, 0.12110438537597656, 0.1216542739868164, 0.12151398468017578, 0.1216204833984375, 0.12237004852294922, 0.24945152282714844, 0.12142694091796875, 0.12127232360839844, 0.12121907043457031, 0.12172697448730468, 0.12396646118164062, 0.1225902099609375, 0.12338483428955078, 0.12398796844482422, 0.12242227172851562, 0.12293631744384766, 0.12390502166748046, 0.12467814636230469, 0.12454605102539062, 0.12382822418212891, 0.12355379486083984, 0.12169728088378906, 0.12186624145507813, 0.12168396759033204, 0.12192768096923828, 0.12244377899169921, 0.1223526382446289, 0.12162764739990234, 0.1217228775024414, 0.12147097778320312, 0.12159078216552735, 0.1216215057373047, 0.12143309020996093, 0.12141465759277344, 0.12173312377929688, 0.12229631805419922, 0.12157337951660156, 0.12169830322265625, 0.12177817535400391, 0.12134502410888671, 0.12178329467773437, 0.1214607391357422, 0.12167884826660157, 0.12179558563232422, 0.12148326110839844, 0.12133785247802735, 0.12141158294677734, 0.12147814178466797, 0.12156723022460937, 0.121670654296875, 0.12136140441894532, 0.12141875457763672, 0.12145868682861329, 0.12202496337890625, 0.12198502349853516, 0.12148429107666016, 0.1211678695678711, 0.12209049224853516, 0.12175667572021484, 0.12315750122070312, 0.1227315216064453, 0.1232210235595703, 0.12416713714599609, 0.12378214263916015, 0.12348108673095703, 0.12337356567382812, 0.12387942504882812, 0.12446208190917969, 0.2528962554931641, 0.12143718719482421, 0.12210176086425781, 0.12319334411621094, 0.12263116455078125, 0.12150784301757812, 0.12382720184326172, 0.12240589141845704, 0.12118323516845703, 0.12321485137939453, 0.1212907485961914, 0.12293119812011719, 0.12185600280761719, 0.12230451202392578, 0.12115968322753906, 0.12163276672363281, 0.12134092712402343, 0.12267520141601562, 0.1209169921875, 0.12166963195800781, 0.1208616943359375, 0.12096511840820312, 0.12128562927246093, 0.12440268707275391, 0.12370124816894532, 0.12368694305419922, 0.12331721496582031, 0.12332236480712891, 0.12322611236572266, 0.12365824127197265, 0.12440985870361328, 0.1228779525756836, 0.121491455078125, 0.12277964782714844, 0.12328857421875, 0.12425523376464843, 0.12316365051269532, 0.12288307189941407, 0.12309913635253907, 0.1242460174560547, 0.1239900131225586, 0.12403404998779297, 0.12373197174072266, 0.12393062591552734, 0.1235763168334961, 0.12192972564697266, 0.12147814178466797, 0.12157234954833984, 0.12149964904785156, 0.12162457275390624, 0.12195123291015625, 0.12152934265136718, 0.12136447906494141, 0.12115865325927734, 0.1212200927734375, 0.1212938232421875, 0.12141465759277344, 0.12101734161376954, 0.12137983703613281, 0.12099174499511718, 0.12123955535888672, 0.12125389099121094, 0.12133273315429688, 0.24995942687988282, 0.12144742584228516, 0.12333875274658203, 0.12174336242675782, 0.12292915344238281, 0.12289024353027343, 0.12305101013183593, 0.12317286682128906, 0.12351487731933594, 0.12141567993164062, 0.12154163360595703, 0.12127436828613282, 0.12144435119628906, 0.12115968322753906, 0.12119142150878906, 0.121849853515625, 0.12192972564697266, 0.12124877166748046, 0.12119551849365234, 0.12158566284179688, 0.12136243438720704, 0.1219583969116211, 0.12119245147705078, 0.12171263885498047, 0.12150169372558593, 0.12132761383056641, 0.12123442840576172, 0.12193587493896485, 0.12132659149169922, 0.12116377258300781, 0.12158566284179688, 0.12126924896240235, 0.12131635284423828, 0.12112076568603515, 0.12137574768066406, 0.12258201599121094, 0.12156825256347656, 0.12157337951660156, 0.12132966613769532, 0.12138905334472656, 0.12101427459716797, 0.12355891418457031, 0.12150784301757812, 0.12143103790283204, 0.12133683013916016, 0.12091596984863281, 0.12126207733154297, 0.12289024353027343, 0.12268339538574219, 0.12567449951171875, 0.1217976303100586, 0.12148326110839844, 0.12115455627441406, 0.12108697509765624, 0.12136038208007813, 0.12146892547607421, 0.1212200927734375, 0.12286054229736328, 0.12166451263427734, 0.12133785247802735, 0.1213829116821289, 0.12138700866699219, 0.12124569702148437, 0.24875520324707032, 0.12118118286132812, 0.12122726440429688, 0.1210931167602539, 0.12280934143066406, 0.1244927978515625, 0.12353024291992187, 0.12354764556884766, 0.12378112030029297, 0.12381798553466797, 0.12268851470947266, 0.12156416320800781, 0.12156313323974609, 0.12137062072753907, 0.12162252807617188, 0.12152524566650391, 0.12139520263671875, 0.12147917175292969, 0.12140338897705077, 0.12144435119628906, 0.12111154937744141, 0.12134502410888671, 0.12145254516601563, 0.12136140441894532, 0.12134809875488281, 0.12144332885742187, 0.12182937622070313, 0.12134092712402343, 0.12142694091796875, 0.12099378967285156, 0.12107981109619141, 0.12137677001953125, 0.12088524627685547, 0.12261273956298828, 0.12443750762939453, 0.12207820892333984, 0.12129280090332031, 0.12143718719482421, 0.12140646362304687, 0.12147711944580078, 0.12159385681152343, 0.12174336242675782, 0.1218243179321289, 0.12172179412841796, 0.12106547546386719, 0.1214750747680664, 0.12177203369140625, 0.12269875335693359, 0.12165017700195313, 0.12121907043457031, 0.12128665924072266, 0.12140748596191406, 0.12142899322509766, 0.1210439682006836, 0.12137471771240234, 0.12203622436523437, 0.12110848236083985, 0.12105216217041016, 0.12070809936523437, 0.12304691314697265, 0.12252262115478516, 0.12272537231445313, 0.12301721954345703, 0.25320652770996094, 0.12138803100585938, 0.12191948699951172, 0.12489830780029297, 0.12382514953613281, 0.12366745758056641, 0.12364390563964844, 0.12391324615478516, 0.1236274871826172, 0.12293836975097656, 0.12138086700439453, 0.12145868682861329, 0.12145561981201172, 0.12148531341552735, 0.1216358413696289, 0.12158668518066407, 0.12107981109619141, 0.12146482849121094, 0.12212019348144532, 0.12342272186279298, 0.12273356628417968, 0.12340838623046875, 0.1230417938232422, 0.12280012512207031, 0.121997314453125, 0.1230387191772461, 0.1221560287475586, 0.12209356689453126, 0.12225433349609376, 0.12141567993164062, 0.12371968078613281, 0.12368895721435547, 0.12325888061523438, 0.12346470642089843, 0.1220495376586914, 0.12130815887451171, 0.12168089294433594, 0.12150169372558593, 0.12148121643066406, 0.12139315032958985, 0.12125389099121094, 0.12131123352050781, 0.1211156463623047, 0.12303667449951172, 0.12199116516113281, 0.12166553497314453, 0.1215989761352539, 0.12130406188964844, 0.12145254516601563, 0.12136243438720704, 0.1214331512451172, 0.12362643432617187, 0.12173107147216797, 0.12140748596191406, 0.12129177856445313, 0.12127334594726563, 0.12132249450683594, 0.12149964904785156, 0.12110438537597656, 0.12128460693359375, 0.12160307312011719, 0.12139417266845703, 0.121133056640625]",tokens/s,8.083924720390927,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1260.384256,1005.060096,0.0,358.612992,318.913024,s,20,0.1778504323959351,0.008892521619796753,0.00029352442906368934,0.008754608154296874,0.00910248613357544,0.009169697380065919,0.009785504264831543,"[0.009939455986022949, 0.008732735633850098, 0.008714752197265625, 0.009129183769226074, 0.008936767578125, 0.00906287956237793, 0.008935744285583497, 0.008770496368408202, 0.008699071884155274, 0.00863708782196045, 0.009064096450805664, 0.009099519729614258, 0.009006879806518554, 0.008716704368591309, 0.008663392066955567, 0.009004608154296875, 0.008722240447998048, 0.008636159896850586, 0.008738719940185546, 0.008639936447143554]",tokens/s,28788.234760102965,kWh,1.0174500456220489e-07,5.575151129166093e-08,2.1683886616880275e-07,3.743353820226686e-07,tokens/kWh,683878714.9019684,MB,1260.384256,1005.060096,0.0,358.612992,328.809472,s,20,10.067610046386722,0.5033805023193361,0.00995087263393504,0.5025519256591797,0.5155266967773438,0.5156778381347656,0.5160263879394532,"[0.499431884765625, 0.5143662109375, 0.516113525390625, 0.5138157348632812, 0.515512451171875, 0.5115390319824219, 0.49984701538085935, 0.4923589782714844, 0.49069613647460936, 0.5090225524902344, 0.5124207153320313, 0.5156549072265625, 0.4935624084472656, 0.49096063232421877, 0.5048639221191407, 0.5133187255859375, 0.49067050170898435, 0.5002399291992188, 0.491751953125, 0.4914628295898438]",tokens/s,125.15383434544286,kWh,5.837131539873614e-06,3.1984816083411113e-06,9.866002533436888e-06,1.8901615681651614e-05,tokens/kWh,3333048.4050184162,,s,1259,10.226770932674393,0.008122931638343454,0.0011308170692653316,0.007988224029541016,0.008203213119506836,0.00825251808166504,0.016815533714294433,"[0.008956928253173829, 0.009018367767333984, 0.008947711944580078, 0.009010175704956054, 0.009335807800292969, 0.008112128257751466, 0.007797760009765625, 0.007737343788146973, 0.007766016006469726, 0.007806975841522217, 0.007930880069732665, 0.007813119888305664, 0.0077608962059021, 0.007756800174713135, 0.007740416049957275, 0.007768064022064209, 0.00773529577255249, 0.007709760189056397, 0.007721920013427735, 0.007746560096740723, 0.00775270414352417, 0.00799232006072998, 0.00784281587600708, 0.008203264236450195, 0.008260607719421387, 0.008042495727539062, 0.008059904098510743, 0.007980031967163086, 0.007763967990875244, 0.007751679897308349, 0.007723008155822754, 0.007699456214904785, 0.007764992237091065, 0.007706624031066894, 0.007740416049957275, 0.00783462381362915, 0.007773183822631836, 0.007748608112335205, 0.0077199358940124516, 0.007729152202606201, 0.007729152202606201, 0.007724031925201416, 0.007738368034362793, 0.007704576015472412, 0.0077199358940124516, 0.007832575798034667, 0.007963647842407226, 0.007987199783325195, 0.007905280113220215, 0.0077916159629821775, 0.007706624031066894, 0.007709695816040039, 0.007702527999877929, 0.007707647800445557, 0.007707647800445557, 0.007699456214904785, 0.007712768077850342, 0.007755775928497314, 0.007730175971984863, 0.007721983909606934, 0.007711743831634522, 0.007840799808502196, 0.01680175971984863, 0.007746560096740723, 0.0077547521591186525, 0.00769536018371582, 0.00773632001876831, 0.008111104011535645, 0.008331263542175293, 0.008501248359680176, 0.008590335845947266, 0.008962047576904298, 0.008557600021362305, 0.007902175903320312, 0.007857151985168457, 0.008179712295532226, 0.008134655952453614, 0.008121343612670898, 0.008165375709533691, 0.008182784080505372, 0.00821452808380127, 0.00810086441040039, 0.00818073558807373, 0.008142848014831543, 0.008248319625854492, 0.0081725435256958, 0.008225791931152344, 0.008181759834289551, 0.008154111862182617, 0.008155136108398438, 0.008208383560180664, 0.008145952224731445, 0.008171487808227539, 0.008196096420288086, 0.008184831619262695, 0.008133631706237793, 0.008138751983642578, 0.008140800476074218, 0.008171520233154296, 0.008120320320129394, 0.008166399955749512, 0.008168448448181152, 0.008184831619262695, 0.008118271827697754, 0.00819711971282959, 0.008182784080505372, 0.008133631706237793, 0.008169471740722656, 0.008173567771911621, 0.008208383560180664, 0.008178688049316407, 0.008148991584777832, 0.008138751983642578, 0.008224767684936523, 0.00830668830871582, 0.008169471740722656, 0.008211456298828124, 0.008210432052612305, 0.008233983993530274, 0.008142848014831543, 0.008143872261047362, 0.008169471740722656, 0.008142848014831543, 0.008184831619262695, 0.008094719886779785, 0.016881664276123046, 0.007815167903900147, 0.008175616264343261, 0.008177696228027343, 0.0081561279296875, 0.008162303924560547, 0.008211456298828124, 0.008211456298828124, 0.0081725435256958, 0.008175616264343261, 0.008283136367797851, 0.008204287528991699, 0.008184831619262695, 0.008186880111694337, 0.008223744392395019, 0.008196096420288086, 0.008203264236450195, 0.008229887962341309, 0.008166399955749512, 0.008252415657043457, 0.008163328170776368, 0.008248319625854492, 0.008264703750610352, 0.00818892765045166, 0.00819814395904541, 0.008241151809692383, 0.008165439605712891, 0.0082042236328125, 0.008162303924560547, 0.008232959747314453, 0.008151040077209473, 0.008152128219604492, 0.008161215782165528, 0.008182784080505372, 0.008133631706237793, 0.008158207893371582, 0.008237055778503418, 0.008202239990234375, 0.008193023681640625, 0.008161279678344726, 0.008217599868774414, 0.008156160354614257, 0.00819711971282959, 0.008166399955749512, 0.008308735847473145, 0.008174592018127442, 0.00821350383758545, 0.00821555233001709, 0.008237055778503418, 0.008190976142883302, 0.00820633602142334, 0.008167424201965333, 0.008225791931152344, 0.008162303924560547, 0.008162303924560547, 0.008168448448181152, 0.00820633602142334, 0.00830463981628418, 0.008229887962341309, 0.008415231704711914, 0.0081725435256958, 0.008143872261047362, 0.008152064323425292, 0.01761075210571289, 0.008202239990234375, 0.008141823768615723, 0.008071167945861817, 0.007961599826812745, 0.008018943786621094, 0.008046591758728027, 0.008005632400512695, 0.00840294361114502, 0.008124416351318359, 0.00820531177520752, 0.008201215744018555, 0.00818073558807373, 0.008185855865478516, 0.00819200038909912, 0.008175616264343261, 0.008318976402282715, 0.008154111862182617, 0.00820531177520752, 0.008134655952453614, 0.008143872261047362, 0.008136704444885253, 0.008194047927856446, 0.008137727737426758, 0.008143872261047362, 0.008171520233154296, 0.008257599830627441, 0.008203200340270996, 0.008194047927856446, 0.00821555233001709, 0.008195072174072266, 0.008143872261047362, 0.007998464107513427, 0.008034303665161132, 0.007986176013946533, 0.007982079982757568, 0.008018943786621094, 0.008036352157592774, 0.008148991584777832, 0.008135680198669434, 0.008142848014831543, 0.008236031532287597, 0.008143872261047362, 0.008142848014831543, 0.008132608413696289, 0.00819200038909912, 0.008179712295532226, 0.008140800476074218, 0.008145983695983888, 0.008236991882324218, 0.0081725435256958, 0.008146944046020508, 0.00818892765045166, 0.008175616264343261, 0.008162303924560547, 0.0081725435256958, 0.00821555233001709, 0.008113151550292968, 0.008209407806396484, 0.008227840423583984, 0.008196096420288086, 0.008126463890075684, 0.008157183647155761, 0.017464319229125978, 0.008184831619262695, 0.00818073558807373, 0.008165375709533691, 0.00821555233001709, 0.008167424201965333, 0.008129535675048828, 0.008302592277526855, 0.008182784080505372, 0.00830361557006836, 0.008194047927856446, 0.008250368118286134, 0.008151040077209473, 0.008257535934448243, 0.008231936454772949, 0.008444928169250488, 0.008429568290710449, 0.00829849624633789, 0.008167424201965333, 0.008171520233154296, 0.008143903732299805, 0.008130528450012208, 0.008302592277526855, 0.008113151550292968, 0.008168448448181152, 0.008145919799804687, 0.008143872261047362, 0.008110079765319824, 0.008140800476074218, 0.008116224288940429, 0.008160256385803222, 0.008153087615966797, 0.00813158416748047, 0.008157183647155761, 0.008136704444885253, 0.008124416351318359, 0.008236031532287597, 0.008158207893371582, 0.008129535675048828, 0.008147968292236327, 0.008122367858886719, 0.008253439903259278, 0.008128512382507324, 0.00810905647277832, 0.008117247581481933, 0.008118271827697754, 0.008119296073913575, 0.008129535675048828, 0.008126463890075684, 0.008141823768615723, 0.008165375709533691, 0.00819200038909912, 0.008229887962341309, 0.008130559921264649, 0.008218624114990235, 0.008133631706237793, 0.008155136108398438, 0.00820531177520752, 0.00811520004272461, 0.008147968292236327, 0.008153087615966797, 0.008163328170776368, 0.008157247543334962, 0.01752979278564453, 0.008169471740722656, 0.008134655952453614, 0.008151040077209473, 0.008130559921264649, 0.008232959747314453, 0.00812339210510254, 0.008141823768615723, 0.008133631706237793, 0.008141823768615723, 0.008154175758361816, 0.00810591983795166, 0.008169471740722656, 0.008153087615966797, 0.008141823768615723, 0.008200223922729493, 0.008241120338439942, 0.008136704444885253, 0.008132608413696289, 0.008174592018127442, 0.008144895553588867, 0.008146944046020508, 0.008145919799804687, 0.008146976470947266, 0.008175583839416503, 0.007980031967163086, 0.007983104228973388, 0.00800051212310791, 0.007997439861297608, 0.00799948787689209, 0.008101887702941894, 0.00801689624786377, 0.008174592018127442, 0.0081397762298584, 0.008117247581481933, 0.008122367858886719, 0.008126463890075684, 0.008122367858886719, 0.008138751983642578, 0.008155136108398438, 0.008119296073913575, 0.008136704444885253, 0.00807423973083496, 0.007927807807922363, 0.007958528041839599, 0.007939072132110595, 0.008014847755432129, 0.007986176013946533, 0.007945216178894043, 0.007932928085327149, 0.007987199783325195, 0.008179712295532226, 0.008140800476074218, 0.008185855865478516, 0.008138751983642578, 0.008184831619262695, 0.008138751983642578, 0.008258560180664062, 0.008273920059204102, 0.008167424201965333, 0.008194047927856446, 0.008145919799804687, 0.008177663803100586, 0.01745510482788086, 0.008202239990234375, 0.008157183647155761, 0.00818380832672119, 0.00870195198059082, 0.008116224288940429, 0.00800153636932373, 0.008036352157592774, 0.007977983951568603, 0.00793497610092163, 0.007962624073028564, 0.007925759792327881, 0.00794316816329956, 0.007910399913787843, 0.007746560096740723, 0.007692287921905518, 0.007689216136932373, 0.0076871681213378906, 0.007707647800445557, 0.007682047843933106, 0.0076912641525268555, 0.007675903797149658, 0.0076984319686889645, 0.007768064022064209, 0.007718912124633789, 0.007701504230499268, 0.007781375885009765, 0.007845888137817383, 0.007806975841522217, 0.007817215919494629, 0.007786496162414551, 0.00787660789489746, 0.007799808025360107, 0.0077814397811889644, 0.007800767898559571, 0.007912447929382324, 0.007824384212493896, 0.007813119888305664, 0.007792640209197998, 0.007946239948272706, 0.007815167903900147, 0.007805952072143555, 0.00778547191619873, 0.007804927825927735, 0.007789567947387695, 0.007865344047546387, 0.007772160053253174, 0.007777279853820801, 0.007775231838226319, 0.007806975841522217, 0.007789567947387695, 0.007932928085327149, 0.007971839904785156, 0.009268223762512207, 0.00830463981628418, 0.008198207855224609, 0.008211392402648926, 0.008195072174072266, 0.008028160095214844, 0.008003583908081055, 0.008034303665161132, 0.008128512382507324, 0.008179712295532226, 0.016935935974121095, 0.00780185604095459, 0.007796735763549805, 0.007805952072143555, 0.007813119888305664, 0.007988224029541016, 0.008045568466186523, 0.007916543960571289, 0.007796735763549805, 0.007798783779144287, 0.007809023857116699, 0.007804927825927735, 0.007792640209197998, 0.007812096118927002, 0.00781824016571045, 0.00780083179473877, 0.007870463848114014, 0.007792640209197998, 0.007804927825927735, 0.007790656089782715, 0.007819200038909912, 0.007790592193603516, 0.007794688224792481, 0.007809023857116699, 0.007795711994171142, 0.007768064022064209, 0.007797760009765625, 0.007795711994171142, 0.007795711994171142, 0.007813119888305664, 0.007798783779144287, 0.007802879810333252, 0.007819263935089112, 0.007811071872711181, 0.007840767860412597, 0.007812096118927002, 0.007788544178009033, 0.007775231838226319, 0.0077916159629821775, 0.007782400131225586, 0.00781004810333252, 0.00780185604095459, 0.007778304100036621, 0.007794688224792481, 0.007767039775848389, 0.007796735763549805, 0.0077916159629821775, 0.007797760009765625, 0.007821311950683594, 0.0077844481468200685, 0.007780352115631104, 0.007840767860412597, 0.0077844481468200685, 0.007767136096954346, 0.0077935681343078616, 0.007836671829223632, 0.007762944221496582, 0.007809023857116699, 0.007853055953979492, 0.007787519931793213, 0.007774208068847656, 0.007829504013061523, 0.00781824016571045, 0.01681612777709961, 0.007815199851989746, 0.007830495834350587, 0.007776256084442139, 0.007788544178009033, 0.007783423900604248, 0.007771135807037354, 0.007827455997467042, 0.007795711994171142, 0.007773183822631836, 0.007782400131225586, 0.007808000087738037, 0.00780083179473877, 0.007798783779144287, 0.007873536109924317, 0.007805952072143555, 0.00780083179473877, 0.007847936153411865, 0.0077844481468200685, 0.007780352115631104, 0.0077916159629821775, 0.007790592193603516, 0.007822336196899414, 0.007794688224792481, 0.007888895988464355, 0.007833600044250488, 0.007797760009765625, 0.007749631881713868, 0.007869440078735352, 0.007769087791442871, 0.007812096118927002, 0.00780083179473877, 0.007803904056549072, 0.007812096118927002, 0.007794688224792481, 0.007811071872711181, 0.007814144134521485, 0.007817215919494629, 0.007795711994171142, 0.007819263935089112, 0.007755775928497314, 0.007792640209197998, 0.007717887878417969, 0.007688223838806152, 0.007795680046081543, 0.007675903797149658, 0.007663616180419922, 0.007669760227203369, 0.007685120105743408, 0.007670783996582031, 0.007699456214904785, 0.007779327869415284, 0.007753791809082031, 0.00776800012588501, 0.007794688224792481, 0.007774208068847656, 0.0077916479110717776, 0.0077833919525146485, 0.007789567947387695, 0.0077506561279296875, 0.007809023857116699, 0.007776256084442139, 0.007787519931793213, 0.016747520446777343, 0.007830527782440186, 0.00781004810333252, 0.007762944221496582, 0.007786496162414551, 0.00778547191619873, 0.007792640209197998, 0.007819263935089112, 0.007789567947387695, 0.007815167903900147, 0.0077844481468200685, 0.007814144134521485, 0.007829504013061523, 0.007815167903900147, 0.008204287528991699, 0.008175616264343261, 0.008138751983642578, 0.008138751983642578, 0.008143872261047362, 0.008132608413696289, 0.008127488136291505, 0.008151040077209473, 0.008156160354614257, 0.008147968292236327, 0.00820736026763916, 0.008129535675048828, 0.008166399955749512, 0.008173600196838379, 0.00812335968017578, 0.008141823768615723, 0.008167424201965333, 0.008120320320129394, 0.008144895553588867, 0.008153087615966797, 0.008212479591369629, 0.008155136108398438, 0.008140800476074218, 0.008129535675048828, 0.008145919799804687, 0.00811520004272461, 0.008171520233154296, 0.008128512382507324, 0.008159232139587403, 0.008104960441589355, 0.008134655952453614, 0.008156160354614257, 0.008217599868774414, 0.008228863716125488, 0.008134655952453614, 0.008118271827697754, 0.008137727737426758, 0.008124447822570801, 0.008145888328552247, 0.00819711971282959, 0.008179743766784668, 0.008160223960876465, 0.008161279678344726, 0.008132608413696289, 0.00820531177520752, 0.008125439643859863, 0.008132608413696289, 0.008161312103271484, 0.008129504203796387, 0.017510400772094727, 0.00813161563873291, 0.008154080390930176, 0.008121343612670898, 0.008201215744018555, 0.008152095794677735, 0.008171487808227539, 0.008169471740722656, 0.008168448448181152, 0.008137727737426758, 0.008147968292236327, 0.008148991584777832, 0.008154111862182617, 0.008139840126037598, 0.008128447532653809, 0.00813158416748047, 0.008137727737426758, 0.008121343612670898, 0.008233983993530274, 0.008128512382507324, 0.008145919799804687, 0.008157183647155761, 0.00813158416748047, 0.008204287528991699, 0.008161279678344726, 0.007993343830108643, 0.00791756820678711, 0.007980031967163086, 0.007956480026245117, 0.007964672088623047, 0.007927807807922363, 0.008011775970458984, 0.008147968292236327, 0.008133631706237793, 0.00818380832672119, 0.008170495986938477, 0.008175616264343261, 0.008127488136291505, 0.008137727737426758, 0.008146944046020508, 0.008140800476074218, 0.008151040077209473, 0.008162303924560547, 0.008179712295532226, 0.008148991584777832, 0.008124416351318359, 0.008137727737426758, 0.008125439643859863, 0.008153087615966797, 0.008114175796508789, 0.008143872261047362, 0.008122367858886719, 0.008156160354614257, 0.008160256385803222, 0.008135680198669434, 0.008086527824401855, 0.008153087615966797, 0.008133631706237793, 0.0081725435256958, 0.0081725435256958, 0.00819200038909912, 0.008133631706237793, 0.008163328170776368, 0.017588224411010742, 0.008179712295532226, 0.008193023681640625, 0.00818380832672119, 0.008154111862182617, 0.008151040077209473, 0.008155136108398438, 0.008173567771911621, 0.008159232139587403, 0.008130559921264649, 0.008146944046020508, 0.008140800476074218, 0.008162336349487305, 0.008166367530822755, 0.008170495986938477, 0.008184831619262695, 0.008157183647155761, 0.008153087615966797, 0.00813158416748047, 0.008167424201965333, 0.008148991584777832, 0.008136704444885253, 0.008152064323425292, 0.008177663803100586, 0.008147968292236327, 0.008144895553588867, 0.008175616264343261, 0.008174592018127442, 0.008225791931152344, 0.008175616264343261, 0.008150015830993652, 0.008158207893371582, 0.008162303924560547, 0.008164352416992187, 0.008138751983642578, 0.008152064323425292, 0.008166399955749512, 0.008134655952453614, 0.008158207893371582, 0.008156160354614257, 0.008137727737426758, 0.00818073558807373, 0.008144895553588867, 0.008691712379455567, 0.008336383819580078, 0.008157183647155761, 0.008369152069091796, 0.00819814395904541, 0.008178688049316407, 0.008175616264343261, 0.008190976142883302, 0.008163328170776368, 0.008146944046020508, 0.00818182373046875, 0.008277952194213867, 0.008161279678344726, 0.00819814395904541, 0.008154111862182617, 0.008211456298828124, 0.008153087615966797, 0.008143872261047362, 0.00818380832672119, 0.008178688049316407, 0.017306623458862306, 0.007987199783325195, 0.00799948787689209, 0.007954432010650634, 0.007939072132110595, 0.008583168029785156, 0.008274944305419921, 0.008042495727539062, 0.008291328430175781, 0.008148991584777832, 0.007989247798919678, 0.007942143917083741, 0.007837696075439453, 0.007702527999877929, 0.007700479984283447, 0.007751679897308349, 0.007745535850524903, 0.007712768077850342, 0.007682047843933106, 0.007650303840637207, 0.007663616180419922, 0.007663616180419922, 0.007679999828338623, 0.00780083179473877, 0.007711743831634522, 0.007683072090148926, 0.007688191890716553, 0.007677951812744141, 0.007696415901184082, 0.007775199890136719, 0.007783423900604248, 0.007798783779144287, 0.007811071872711181, 0.007773183822631836, 0.007781375885009765, 0.0077844481468200685, 0.007805952072143555, 0.007782400131225586, 0.007778304100036621, 0.007778304100036621, 0.0078438401222229, 0.007774208068847656, 0.007861248016357422, 0.007822336196899414, 0.00785203218460083, 0.007787519931793213, 0.00779366397857666, 0.007774208068847656, 0.007798783779144287, 0.0077608962059021, 0.00780185604095459, 0.0077916159629821775, 0.00780083179473877, 0.007935999870300293, 0.007792640209197998, 0.007762944221496582, 0.007783423900604248, 0.007781375885009765, 0.007789567947387695, 0.007780352115631104, 0.00781004810333252, 0.0077844481468200685, 0.007790592193603516, 0.01681510353088379, 0.00780185604095459, 0.0077916159629821775, 0.007786496162414551, 0.00780185604095459, 0.007774208068847656, 0.007781375885009765, 0.007767039775848389, 0.007797760009765625, 0.008054783821105957, 0.007769120216369629, 0.007801824092864991, 0.007887872219085693, 0.007806975841522217, 0.00780185604095459, 0.007796735763549805, 0.007758848190307617, 0.007686143875122071, 0.007758848190307617, 0.007764992237091065, 0.007665664196014404, 0.007762944221496582, 0.007830527782440186, 0.0076984319686889645, 0.007705599784851074, 0.007775231838226319, 0.007783423900604248, 0.0077506561279296875, 0.007763967990875244, 0.007799808025360107, 0.007774208068847656, 0.0077844481468200685, 0.007889920234680176, 0.007813119888305664, 0.00785203218460083, 0.007798816204071045, 0.007802847862243652, 0.0077619199752807615, 0.0077608962059021, 0.007812096118927002, 0.007770112037658691, 0.007820288181304931, 0.007809023857116699, 0.007751679897308349, 0.007801919937133789, 0.007790527820587158, 0.00778547191619873, 0.007769087791442871, 0.007772160053253174, 0.007780352115631104, 0.007766016006469726, 0.007772160053253174, 0.007797760009765625, 0.007746560096740723, 0.007767039775848389, 0.007792640209197998, 0.007804927825927735, 0.00780188798904419, 0.007841760158538819, 0.007790592193603516, 0.007799808025360107, 0.00780185604095459, 0.007794688224792481, 0.016740352630615234, 0.007783423900604248, 0.00780185604095459, 0.007804927825927735, 0.007759871959686279, 0.007790592193603516, 0.007788544178009033, 0.007780352115631104, 0.007831615924835205, 0.007797696113586426, 0.007781407833099365, 0.007782368183135986, 0.007770112037658691, 0.007808000087738037, 0.007779327869415284, 0.007780352115631104, 0.007817279815673829, 0.007812032222747803, 0.008190976142883302, 0.008184831619262695, 0.008161279678344726, 0.008177727699279784, 0.008145855903625489, 0.0081397762298584, 0.008135680198669434, 0.008153087615966797, 0.008148991584777832, 0.008147968292236327, 0.0081397762298584, 0.008167424201965333, 0.008143903732299805, 0.008128479957580566, 0.008156160354614257, 0.008138751983642578, 0.008133631706237793, 0.008155136108398438, 0.008153087615966797, 0.008138751983642578, 0.008173567771911621, 0.008208383560180664, 0.008002559661865234, 0.00800972843170166, 0.007980031967163086, 0.008076288223266602, 0.00808448028564453, 0.007964672088623047, 0.008034303665161132, 0.007988224029541016, 0.007964672088623047, 0.007987199783325195, 0.00799948787689209, 0.007969791889190675, 0.007956480026245117, 0.007990272045135497, 0.00797388792037964, 0.00794316816329956, 0.008023039817810058, 0.008061951637268066, 0.008179743766784668, 0.008191967964172363, 0.0081725435256958, 0.008150015830993652, 0.008185855865478516, 0.017484800338745117, 0.008162303924560547, 0.008151040077209473, 0.008169471740722656, 0.008161279678344726, 0.008167424201965333, 0.008156160354614257, 0.008194047927856446, 0.008224767684936523, 0.008291328430175781, 0.008148991584777832, 0.008154111862182617, 0.008160256385803222, 0.00816438388824463, 0.008163295745849609, 0.008171520233154296, 0.008177663803100586, 0.0081725435256958, 0.008231936454772949, 0.008153087615966797, 0.008132608413696289, 0.008309760093688966, 0.008117247581481933, 0.007990272045135497, 0.007979008197784423, 0.007970816135406494, 0.007997439861297608, 0.00799232006072998, 0.00801587200164795, 0.007982079982757568, 0.008145983695983888, 0.008135616302490234, 0.008164352416992187, 0.008163328170776368, 0.008165375709533691, 0.008184831619262695, 0.008158207893371582, 0.008155136108398438, 0.008176639556884765, 0.008182784080505372, 0.008145919799804687, 0.008140800476074218, 0.008156160354614257, 0.008150015830993652, 0.008135680198669434, 0.0081397762298584, 0.008142848014831543, 0.008136704444885253, 0.0081080322265625, 0.008219648361206054, 0.008170495986938477, 0.008157183647155761, 0.008315903663635254, 0.00819711971282959, 0.008167424201965333, 0.008058879852294922, 0.007974912166595459, 0.008057855606079101, 0.00821350383758545, 0.008169471740722656, 0.008144895553588867, 0.008160256385803222, 0.008122367858886719, 0.01683046340942383, 0.00781004810333252, 0.007808000087738037, 0.00778547191619873, 0.007855103969573975, 0.007806975841522217, 0.007795711994171142, 0.007780352115631104, 0.007798783779144287, 0.007795711994171142, 0.0077916159629821775, 0.007814144134521485, 0.007811071872711181, 0.007796735763549805, 0.007827455997467042, 0.007817215919494629, 0.007875584125518798, 0.007839744091033935, 0.007795711994171142, 0.007805952072143555, 0.007778304100036621, 0.0077916159629821775, 0.007786496162414551, 0.007789567947387695, 0.007786496162414551, 0.0077916159629821775, 0.007804927825927735, 0.007798783779144287, 0.007790592193603516, 0.007811071872711181, 0.007797760009765625, 0.007786496162414551, 0.007812096118927002, 0.007651328086853027, 0.007665664196014404, 0.007666687965393066, 0.007665664196014404, 0.007659520149230957, 0.007679999828338623, 0.007675903797149658, 0.007781375885009765, 0.007795711994171142, 0.007799808025360107, 0.007756800174713135, 0.007774208068847656, 0.0077578239440917966, 0.007971839904785156, 0.007782400131225586, 0.007790592193603516, 0.007787519931793213, 0.007797760009765625, 0.007782400131225586, 0.00779366397857666, 0.007764992237091065, 0.007797760009765625, 0.007774208068847656, 0.007792640209197998, 0.007769087791442871, 0.007784512042999268, 0.007765952110290527, 0.007788544178009033, 0.007747583866119385, 0.007840767860412597, 0.016747520446777343, 0.007811071872711181, 0.007816192150115966, 0.007752768039703369, 0.007791552066802978, 0.007802879810333252, 0.007780352115631104, 0.0077916159629821775, 0.007798783779144287, 0.00780185604095459, 0.007831552028656007, 0.007783423900604248, 0.007788544178009033, 0.007796735763549805, 0.007817215919494629, 0.007794688224792481, 0.007841792106628418, 0.007812096118927002, 0.007792640209197998, 0.007789567947387695, 0.007776256084442139, 0.00780185604095459, 0.007771135807037354, 0.007783423900604248, 0.007767039775848389, 0.007773183822631836, 0.007803904056549072, 0.007847936153411865, 0.007794688224792481, 0.007759903907775879, 0.007892960071563721, 0.007931903839111328, 0.007790592193603516, 0.00786636781692505, 0.007941120147705078, 0.008167424201965333, 0.007970816135406494, 0.007970816135406494, 0.007945216178894043, 0.007967743873596191, 0.007945216178894043, 0.007947264194488525, 0.007963647842407226, 0.007942143917083741, 0.008156160354614257, 0.008130559921264649, 0.008107071876525878, 0.00812947177886963, 0.008143872261047362, 0.008111104011535645, 0.008150015830993652, 0.008143872261047362, 0.008136704444885253, 0.008132608413696289, 0.008125439643859863, 0.008134655952453614, 0.008170495986938477, 0.00810905647277832, 0.008399871826171875, 0.008129535675048828, 0.008162303924560547, 0.008132608413696289, 0.008134655952453614, 0.01683251190185547, 0.007788544178009033, 0.007817215919494629, 0.007799808025360107, 0.007805952072143555, 0.007803904056549072, 0.007779327869415284, 0.0077916159629821775, 0.007804927825927735, 0.007817215919494629, 0.007822336196899414, 0.007809023857116699, 0.007855103969573975, 0.007806975841522217, 0.007815167903900147, 0.00784281587600708, 0.007825407981872558, 0.007775231838226319, 0.007829504013061523, 0.007797760009765625, 0.007817215919494629, 0.0077916159629821775, 0.00780185604095459, 0.007789567947387695, 0.0077844481468200685, 0.007764992237091065, 0.007797760009765625, 0.007780352115631104, 0.007777279853820801, 0.007769087791442871, 0.007795711994171142, 0.007806975841522217, 0.007773183822631836, 0.007780416011810303, 0.007806911945343018, 0.007809023857116699, 0.007812096118927002, 0.007888895988464355, 0.007832575798034667, 0.007770112037658691, 0.00778547191619873, 0.007788544178009033, 0.007790592193603516, 0.007799808025360107, 0.007798783779144287, 0.007837696075439453, 0.007821311950683594, 0.00779366397857666, 0.00778547191619873, 0.0077844481468200685, 0.007777279853820801, 0.007804992198944091, 0.007788479804992676, 0.007777279853820801, 0.00781004810333252, 0.007784480094909668, 0.007799776077270508, 0.007783423900604248, 0.007840767860412597, 0.007831552028656007, 0.007805952072143555, 0.007767039775848389, 0.007812096118927002, 0.016696319580078126, 0.007795711994171142, 0.007797760009765625, 0.007806975841522217, 0.007748640060424804, 0.0077833919525146485, 0.007792640209197998, 0.007804927825927735, 0.007803904056549072, 0.007797760009765625, 0.0077742719650268554, 0.007868351936340333, 0.007813119888305664, 0.007794688224792481, 0.007806975841522217, 0.00780083179473877, 0.007832575798034667, 0.007783423900604248, 0.007816192150115966, 0.007777279853820801, 0.007782400131225586, 0.007778304100036621, 0.007790592193603516, 0.007867392063140868, 0.0077916159629821775, 0.007799808025360107, 0.007783423900604248, 0.007789567947387695, 0.007783423900604248, 0.007789567947387695, 0.00778547191619873, 0.007780352115631104, 0.007875584125518798, 0.007797760009765625, 0.00778547191619873, 0.007756800174713135, 0.007773183822631836, 0.007787519931793213, 0.007767039775848389, 0.007811071872711181, 0.00779366397857666, 0.007770112037658691, 0.007783423900604248, 0.00781004810333252, 0.007797760009765625, 0.007773183822631836, 0.007773183822631836, 0.007782400131225586, 0.007844863891601562, 0.007785535812377929, 0.007792575836181641, 0.007806975841522217, 0.00781824016571045, 0.007796735763549805, 0.00778547191619873, 0.007778304100036621, 0.007828479766845703, 0.007777279853820801, 0.007858176231384278, 0.007815167903900147, 0.0077608962059021, 0.00781004810333252, 0.007812096118927002]",tokens/s,123.10826245041925,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1392.545792,6237.454336,0.0,5591.007232,5346.100736,s,10,5.702087280273438,0.5702087280273438,0.0032758876463017,0.5691306762695312,0.5706701721191406,0.5753404876708984,0.5790767401123047,"[0.5800108032226563, 0.5690457763671875, 0.56963232421875, 0.5686717529296875, 0.5691049194335938, 0.5689085083007812, 0.5691564331054687, 0.5690445556640625, 0.5692649536132812, 0.5692472534179688]",tokens/s,448.9584031546493,kWh,6.730608862859232e-06,3.6874586645353704e-06,3.2089053449001976e-05,4.2507120976396576e-05,tokens/kWh,6022520.32411586,MB,1392.545792,6237.454336,0.0,5591.007232,5555.34336,s,10,329.95942187500003,32.995942187500006,0.00677698481578348,32.996611328125,33.003900390625,33.0066337890625,33.008820507812494,"[32.98758984375, 32.99876953125, 32.99800390625, 33.00329296875, 32.98576171875, 32.99270703125, 32.99606640625, 32.99070703125, 32.99715625, 33.0093671875]",tokens/s,1.9093256874436688,kWh,0.0003894760356291577,0.00021346548665747832,0.001836931469543996,0.002439872991830632,tokens/kWh,25821.016180326347,,s,629,334.5575883178707,0.5318880577390637,0.0676032283856282,0.5236981811523438,0.5243047729492187,0.52448828125,1.092699482421875,"[0.5232855224609375, 0.5235804443359375, 0.5231001586914062, 0.5230325927734375, 0.52342578125, 0.5232117919921875, 0.5233387451171875, 0.52324658203125, 0.523146240234375, 0.5234544677734375, 0.5234892578125, 0.523303955078125, 0.5232906494140624, 0.52303564453125, 0.5235220336914063, 0.5233828125, 0.5234381103515625, 0.5232639770507812, 0.523114501953125, 0.5234656982421875, 0.5235753173828125, 0.5235343627929687, 0.5234656982421875, 0.52356298828125, 0.5236715698242187, 0.523630615234375, 0.523747314453125, 0.5236019287109375, 0.5239511108398438, 0.5234032592773438, 0.5237637329101562, 0.5235281982421875, 0.5234298706054688, 0.5238978271484375, 0.523704345703125, 0.5234800415039063, 0.523568115234375, 0.5234032592773438, 0.5236162719726563, 0.523826171875, 0.5237114868164062, 0.5236602783203125, 0.5236776733398437, 0.5234022827148438, 0.5235650024414062, 0.5239142456054687, 0.5236613159179687, 0.5238640747070312, 0.5236664428710938, 0.5236551513671875, 0.5237268676757812, 0.523779052734375, 0.5245716552734375, 0.5238753051757813, 0.5239859008789063, 0.5239797973632813, 0.52379443359375, 0.5241487426757813, 0.5243228149414062, 0.5239275512695313, 0.5239859008789063, 0.5239849243164062, 1.092547607421875, 0.5234862060546875, 0.5234585571289062, 0.523430908203125, 0.523419677734375, 0.5231114501953125, 0.52326806640625, 0.5233162231445313, 0.5237933959960938, 0.5237329711914063, 0.5236541137695313, 0.5234268188476563, 0.5234974975585938, 0.5239500732421875, 0.5236981811523438, 0.5235404663085937, 0.5233397827148437, 0.5233663940429687, 0.5233930053710938, 0.523251708984375, 0.5238825073242187, 0.5233387451171875, 0.52321484375, 0.5236510620117187, 0.5232701416015625, 0.523261962890625, 0.5234851684570313, 0.52385791015625, 0.5233141479492187, 0.5233141479492187, 0.5237933959960938, 0.5236162719726563, 0.5235394287109375, 0.5236039428710938, 0.5236265258789062, 0.523404296875, 0.524432373046875, 0.5245040893554688, 0.5246044311523438, 0.52440576171875, 0.5234370727539063, 0.523509765625, 0.5242644653320313, 0.5239736328125, 0.5239613647460938, 0.5238251342773438, 0.5239859008789063, 0.5239480590820312, 0.5240678100585937, 0.5244600219726563, 0.5235599365234375, 0.5235322875976562, 0.5239992065429687, 0.5245665283203125, 0.5246177368164062, 0.5242409057617188, 0.5239818115234375, 0.5243668212890625, 0.5246289672851563, 0.5246351318359375, 0.524642333984375, 0.5243975830078125, 0.5244672241210937, 1.093401611328125, 0.5240750122070312, 0.5239552001953125, 0.52360498046875, 0.523466796875, 0.5243002319335938, 0.5241241455078125, 0.5247160034179688, 0.5239346923828125, 0.5236541137695313, 0.5237217407226562, 0.5237616577148437, 0.5244815063476562, 0.524211181640625, 0.5238456420898437, 0.5239695434570313, 0.5239244995117187, 0.5236213989257813, 0.5235159301757812, 0.5238743286132812, 0.5235824584960938, 0.5235179443359375, 0.52333056640625, 0.5233848266601563, 0.5233059692382812, 0.5234913330078125, 0.5241917724609375, 0.5241712646484376, 0.523509765625, 0.523978759765625, 0.524326904296875, 0.5240657958984375, 0.5243740234375, 0.5234913330078125, 0.523325439453125, 0.5236459350585938, 0.5237012329101562, 0.5237268676757812, 0.5233213500976562, 0.5239797973632813, 0.52371044921875, 0.524242919921875, 0.523598876953125, 0.5236613159179687, 0.5237073974609375, 0.523826171875, 0.5238538208007812, 0.5235712280273438, 0.523441162109375, 0.5235732421875, 0.523472900390625, 0.5238896484375, 0.5235292358398438, 0.5239623413085938, 0.5240350952148437, 0.5237391357421874, 0.5234851684570313, 0.5235486450195312, 0.5237903442382813, 0.5235435791015625, 0.5234749145507812, 0.5236244506835938, 0.5238026123046875, 1.0924083251953125, 0.5236694946289062, 0.52347802734375, 0.5234851684570313, 0.5235445556640625, 0.5234339599609374, 0.5239521484375, 0.52405859375, 0.5237380981445312, 0.5242726440429688, 0.5238128662109375, 0.5241282348632812, 0.5237278442382812, 0.524042236328125, 0.523821044921875, 0.523958251953125, 0.5236961059570312, 0.5237063598632813, 0.5236746215820313, 0.5236930541992187, 0.5239552001953125, 0.5237145385742188, 0.5233878784179687, 0.5233551635742187, 0.5233059692382812, 0.5233325805664063, 0.523447265625, 0.5242828979492188, 0.523799560546875, 0.5240350952148437, 0.5233172607421875, 0.523683837890625, 0.5236930541992187, 0.52356298828125, 0.5234247436523437, 0.5236224365234375, 0.5233048706054687, 0.5234401245117187, 0.5237933959960938, 0.5242378540039062, 0.5238743286132812, 0.5239449462890625, 0.5243023071289062, 0.5239142456054687, 0.5239797973632813, 0.5242982177734375, 0.5239521484375, 0.5240166625976562, 0.5240678100585937, 0.5244170532226563, 0.5238589477539063, 0.5242276000976562, 0.5240964965820313, 0.523994140625, 0.5239715576171875, 0.5243576049804688, 0.5245450439453125, 0.5247354736328125, 0.5245726928710938, 0.5245236206054688, 0.5238475952148437, 0.523821044921875, 0.5236725463867188, 1.0934261474609375, 0.52333056640625, 0.5229783325195313, 0.5233264770507813, 0.523093994140625, 0.5231533813476562, 0.5235353393554687, 0.5235691528320312, 0.5232783203125, 0.5233970947265625, 0.52322509765625, 0.5232906494140624, 0.5234452514648438, 0.5239552001953125, 0.5232609252929687, 0.5235496826171875, 0.5232435302734375, 0.5234022216796875, 0.5231646728515625, 0.52375244140625, 0.5232578735351563, 0.5234319458007812, 0.52356298828125, 0.5234237670898437, 0.523451416015625, 0.5234012451171876, 0.5241415405273437, 0.5235762939453125, 0.523673583984375, 0.5237974853515625, 0.52398388671875, 0.52423681640625, 0.5239766845703125, 0.5234073486328125, 0.5233756103515625, 0.5234616088867188, 0.5233121337890625, 0.5233397827148437, 0.5239388427734375, 0.5235742797851562, 0.5236398315429688, 0.52368896484375, 0.5234555053710938, 0.5237698364257812, 0.5233592529296875, 0.523894775390625, 0.5234401245117187, 0.5235117797851563, 0.523404296875, 0.5233878784179687, 0.5234124755859375, 0.5239408569335937, 0.52353125, 0.5234237670898437, 0.5235128784179688, 0.523874267578125, 0.5239234619140625, 0.5243146362304687, 0.5239275512695313, 0.5238773803710938, 0.5242818603515625, 0.523810791015625, 0.523821044921875, 1.0930257568359374, 0.5232107543945312, 0.52316162109375, 0.5232630004882812, 0.5232639770507812, 0.5235537719726563, 0.5244201049804688, 0.5233796997070312, 0.5232916259765625, 0.523272216796875, 0.52347802734375, 0.5233551635742187, 0.5237022705078125, 0.523598876953125, 0.5238466796875, 0.524136474609375, 0.5237606201171875, 0.5238057250976562, 0.523778076171875, 0.5240791015625, 0.5236336669921875, 0.5233408203125, 0.5235425415039062, 0.5236541137695313, 0.523335693359375, 0.5239746704101562, 0.5234063110351562, 0.5233428344726563, 0.523420654296875, 0.5235691528320312, 0.523378662109375, 0.5238292236328125, 0.523472900390625, 0.5234135131835937, 0.5238937377929688, 0.52394189453125, 0.523778076171875, 0.52375244140625, 0.5239193725585938, 0.5237340087890625, 0.5233941040039063, 0.5235650024414062, 0.5234503784179687, 0.5236265258789062, 0.5240217895507813, 0.52352001953125, 0.5239121704101563, 0.5239193725585938, 0.5238906860351562, 0.5238906860351562, 0.524147705078125, 0.5241671752929687, 0.5243658447265624, 0.5237421875, 0.5238804321289062, 0.5237001953125, 0.523598876953125, 0.5240176391601562, 0.5237852172851563, 0.52413232421875, 0.5235875854492188, 0.52364697265625, 0.52398388671875, 1.0928189697265625, 0.5236561889648438, 0.5236930541992187, 0.5234370727539063, 0.5235292358398438, 0.523916259765625, 0.5238763427734375, 0.5241989135742188, 0.52381591796875, 0.5243770751953125, 0.5241866455078125, 0.5241456909179687, 0.5243525390625, 0.5239951171875, 0.5239971923828125, 0.524368896484375, 0.5239060668945312, 0.524020751953125, 0.5241405639648438, 0.5238804321289062, 0.52356201171875, 0.5234821166992187, 0.5237688598632813, 0.5234370727539063, 0.5234564819335937, 0.523536376953125, 0.5236070556640625, 0.5234503784179687, 0.5238599853515625, 0.5234083862304687, 0.5233295288085937, 0.523884521484375, 0.5232977905273437, 0.5238272094726563, 0.5236193237304687, 0.5233909912109375, 0.5233141479492187, 0.5239879760742188, 0.5235762939453125, 0.5241558837890625, 0.5236766967773437, 0.5239725952148437, 0.5236613159179687, 0.52408935546875, 0.5242327270507813, 0.5238917236328124, 0.5234862060546875, 0.5241200561523438, 0.5234708251953125, 0.523630615234375, 0.5242705688476562, 0.5235712280273438, 0.5233428344726563, 0.52341455078125, 0.5233182983398438, 0.52377294921875, 0.5237073974609375, 0.5236223754882813, 0.5232424926757813, 0.5235609741210937, 0.523462646484375, 0.523536376953125, 0.5239029541015625, 1.092758544921875, 0.5232230224609375, 0.5236244506835938, 0.5231585083007813, 0.5233715209960937, 0.5235814208984375, 0.5233612670898438, 0.523536376953125, 0.523325439453125, 0.5234390869140625, 0.52335205078125, 0.52364697265625, 0.5233899536132812, 0.5232977905273437, 0.5235599365234375, 0.5235517578125, 0.5233920288085937, 0.52366845703125, 0.5234319458007812, 0.523514892578125, 0.5232752685546875, 0.5237012329101562, 0.5237196655273437, 0.5233766479492188, 0.5236971435546875, 0.52352099609375, 0.5235537719726563, 0.523345947265625, 0.5235455932617188, 0.52406884765625, 0.5242286376953125, 0.524389404296875, 0.5235241088867187, 0.5232578735351563, 0.5233694458007813, 0.523747314453125, 0.5242389526367187, 0.523600830078125, 0.5237022705078125, 0.5236387939453125, 0.5238128662109375, 0.5234390869140625, 0.5237319946289063, 0.5238507690429688, 0.5233745727539062, 0.5234442138671875, 0.5238927612304688, 0.523767822265625, 0.523869140625, 0.523968505859375, 0.5244630737304687, 0.5236377563476563, 0.52392138671875, 0.5233858642578125, 0.5234381103515625, 0.5237340087890625, 0.5240494384765625, 0.52394189453125, 0.5239357299804688, 0.5239705810546875, 0.5242203979492187, 0.5239931030273437, 0.5242460327148437, 1.093507080078125, 0.523989013671875, 0.5240360717773438, 0.5239337158203125, 0.5234032592773438, 0.5238927612304688, 0.5234606323242188, 0.5235845336914062, 0.5233848266601563, 0.5233233642578125, 0.5231104125976562, 0.5241026611328125, 0.5236070556640625, 0.523736083984375, 0.523272216796875, 0.5247611083984375, 0.5243330688476563, 0.5240186767578126, 0.524157958984375, 0.5240443115234374, 0.523978759765625, 0.5238374633789062, 0.5240657958984375, 0.5238599853515625, 0.5237462768554687, 0.5236694946289062, 0.523599853515625, 0.52335205078125, 0.5244129028320312, 0.5246812133789063, 0.524706787109375, 0.524564453125, 0.5243494262695313, 0.524263427734375, 0.5238323364257812, 0.523388916015625, 0.5234790649414063, 0.5235701904296876, 0.5234329833984375, 0.52348828125, 0.5235302124023438, 0.5233008422851563, 0.5235025634765625, 0.52366748046875, 0.523378662109375, 0.5235241088867187, 0.523799560546875, 0.5239378051757813, 0.5237391357421874, 0.5240770263671874, 0.5232752685546875, 0.5233377075195312, 0.5234656982421875, 0.5238057250976562, 0.5232496337890625, 0.5235005493164062, 0.52366845703125, 0.52352099609375, 0.5235978393554688, 0.5234442138671875, 0.5232630004882812, 0.523420654296875, 0.5245173950195312, 1.092947998046875, 0.52453271484375, 0.5243463745117187, 0.5242276000976562, 0.5244682006835938, 0.524368896484375, 0.5243944702148438, 0.5240617065429688, 0.5243750610351563, 0.5239592895507813, 0.5239234619140625, 0.52438525390625, 0.5242869873046875, 0.5240463256835938, 0.5240596313476562, 0.524732421875, 0.5245419311523437, 0.5243187255859375, 0.5240545043945313, 0.524000244140625, 0.5237412109375, 0.5240115356445313, 0.5239132080078125, 0.5242838745117188, 0.5240617065429688, 0.524099609375, 0.5238660888671876, 0.5240678100585937, 0.5237862548828125, 0.5238876342773438, 0.5237872924804687, 0.5241005859375, 0.5238323364257812, 0.523931640625, 0.5241927490234375, 0.5244938354492188, 0.5244067993164062, 0.5234135131835937, 0.523315185546875, 0.52359375, 0.5232455444335937, 0.5234421997070312, 0.5235281982421875, 0.5233162231445313, 0.523388916015625, 0.523452392578125, 0.5235916748046875, 0.5241804809570313, 0.5237412109375, 0.52387939453125, 0.5238538208007812, 0.5236848754882812, 0.523737060546875, 0.5239080810546874, 0.5241026611328125, 0.5244927978515626, 0.5238046875, 0.5238814697265625, 0.5236787109375, 0.5235701904296876, 0.5235640258789063, 0.524210205078125, 0.5235947265625]",tokens/s,1.8800948535125506,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,6416.572416,20902.838272,0.0,20256.391168,19273.842688,s,10,27.008011230468746,2.700801123046875,0.0043850724799974665,2.700565551757813,2.7060529296875,2.7063339599609373,2.7065587841796876,"[2.703509521484375, 2.7029794921875, 2.69485400390625, 2.69747705078125, 2.696227783203125, 2.69636279296875, 2.698151611328125, 2.705843505859375, 2.706614990234375, 2.705990478515625]",tokens/s,94.7866904436106,kWh,3.1842278407679666e-05,1.7448346880028112e-05,0.00015081939843319958,0.00020011002372090735,tokens/kWh,1279296.2353402255,MB,6420.922368,20902.838272,0.0,20256.391168,19862.693376,s,10,1586.4640625,158.64640624999998,0.022306639855444715,158.64365625,158.67640156250002,158.68144296875,158.68547609375,"[158.63340625, 158.605671875, 158.67528125, 158.686484375, 158.659984375, 158.625515625, 158.642296875, 158.6395, 158.65090625, 158.645015625]",tokens/s,0.3971095311211943,kWh,0.0018735859500534004,0.0010268917923910701,0.0086674246283786,0.011567902370823072,tokens/kWh,5446.104054171531,,s,629,1608.2408713378911,2.556821735036392,0.3208393016972219,2.518036376953125,2.5194736328125003,2.520285693359375,5.21544693359375,"[2.518319091796875, 2.519310302734375, 2.51766064453125, 2.517339111328125, 2.51743017578125, 2.51803955078125, 2.517208984375, 2.517015625, 2.516982666015625, 2.5182587890625, 2.51766064453125, 2.517487548828125, 2.5175009765625, 2.51837646484375, 2.5176474609375, 2.517837890625, 2.517665771484375, 2.51789208984375, 2.517560302734375, 2.517328857421875, 2.517376953125, 2.51846044921875, 2.517990478515625, 2.517222412109375, 2.517308349609375, 2.518451171875, 2.5171455078125, 2.518161376953125, 2.51881884765625, 2.518177734375, 2.517626953125, 2.5173310546875, 2.51724072265625, 2.517538818359375, 2.51719384765625, 2.5173740234375, 2.517359619140625, 2.518120361328125, 2.517709716796875, 2.51768310546875, 2.51764013671875, 2.51835400390625, 2.51860791015625, 2.51725927734375, 2.518401123046875, 2.5179462890625, 2.518530029296875, 2.51753466796875, 2.517834716796875, 2.518044677734375, 2.517002197265625, 2.5178828125, 2.51770361328125, 2.5193173828125, 2.519462890625, 2.51964306640625, 2.51835595703125, 2.51885986328125, 2.51799560546875, 2.51936767578125, 2.519690185546875, 2.5200732421875, 5.22134814453125, 2.517864501953125, 2.51825244140625, 2.51934716796875, 2.5177138671875, 2.5171435546875, 2.517161865234375, 2.51721826171875, 2.51709326171875, 2.517622802734375, 2.518835205078125, 2.51759912109375, 2.517168212890625, 2.517455810546875, 2.518162353515625, 2.517591064453125, 2.51749365234375, 2.517919677734375, 2.517905517578125, 2.51747216796875, 2.51761669921875, 2.517444580078125, 2.51732373046875, 2.516890625, 2.517098388671875, 2.517686279296875, 2.517358642578125, 2.516806640625, 2.516729736328125, 2.51791259765625, 2.517181396484375, 2.516504638671875, 2.5171435546875, 2.517117919921875, 2.517271484375, 2.516725830078125, 2.5167197265625, 2.5176513671875, 2.517125, 2.51666015625, 2.51749365234375, 2.518317138671875, 2.5185751953125, 2.51751025390625, 2.517790771484375, 2.517456787109375, 2.5185126953125, 2.517084228515625, 2.517465087890625, 2.51748974609375, 2.517200927734375, 2.51684033203125, 2.51730126953125, 2.518183837890625, 2.518331298828125, 2.51782861328125, 2.51742919921875, 2.51755615234375, 2.517507080078125, 2.517507080078125, 2.517161865234375, 2.517621826171875, 2.517966796875, 5.21453759765625, 2.5204541015625, 2.52109814453125, 2.519462890625, 2.518677490234375, 2.519301025390625, 2.518960205078125, 2.516992919921875, 2.517800048828125, 2.517873779296875, 2.51792578125, 2.517770263671875, 2.5189140625, 2.517824462890625, 2.517718994140625, 2.518151123046875, 2.51847998046875, 2.518257568359375, 2.518415283203125, 2.51804052734375, 2.518096923828125, 2.5183427734375, 2.518391845703125, 2.517865478515625, 2.519243896484375, 2.51814501953125, 2.518434814453125, 2.518415283203125, 2.518769775390625, 2.519129150390625, 2.518907958984375, 2.519370849609375, 2.5190185546875, 2.518751220703125, 2.52101416015625, 2.518179931640625, 2.519162841796875, 2.518454345703125, 2.519679931640625, 2.5194833984375, 2.5204951171875, 2.518433837890625, 2.51841650390625, 2.5182412109375, 2.519734375, 2.519425048828125, 2.518444091796875, 2.51785205078125, 2.518681640625, 2.518297607421875, 2.518287353515625, 2.51755517578125, 2.519033935546875, 2.518720458984375, 2.5197587890625, 2.519648193359375, 2.518752197265625, 2.51768212890625, 2.51812255859375, 2.51717626953125, 2.51780908203125, 2.517684326171875, 2.5175439453125, 5.2187802734375, 2.519068603515625, 2.518744140625, 2.51738525390625, 2.518098876953125, 2.51873681640625, 2.519017578125, 2.51888427734375, 2.519688232421875, 2.52031396484375, 2.518892578125, 2.519817138671875, 2.5199248046875, 2.51848388671875, 2.518221923828125, 2.517916748046875, 2.519314453125, 2.518415283203125, 2.517685302734375, 2.51694287109375, 2.51803857421875, 2.52027587890625, 2.520306640625, 2.518614013671875, 2.520572998046875, 2.517498779296875, 2.517350341796875, 2.5175869140625, 2.51862841796875, 2.517562255859375, 2.51831201171875, 2.51706982421875, 2.518307861328125, 2.517770263671875, 2.518127685546875, 2.517791748046875, 2.51879833984375, 2.51784912109375, 2.518008056640625, 2.5185615234375, 2.518908935546875, 2.5175224609375, 2.517482421875, 2.517506103515625, 2.5204541015625, 2.518011962890625, 2.521114501953125, 2.5210244140625, 2.520853515625, 2.52010693359375, 2.52200244140625, 2.52052880859375, 2.52000048828125, 2.51896728515625, 2.519782470703125, 2.51968603515625, 2.51883935546875, 2.51808251953125, 2.51833349609375, 2.517741455078125, 2.51945068359375, 2.518738037109375, 2.51962060546875, 5.21556396484375, 2.51751025390625, 2.517732421875, 2.51755712890625, 2.51782666015625, 2.51806005859375, 2.518541259765625, 2.517211181640625, 2.51755322265625, 2.516893798828125, 2.517453857421875, 2.517432373046875, 2.517993408203125, 2.517221435546875, 2.517614501953125, 2.518161376953125, 2.5184521484375, 2.518592529296875, 2.517885986328125, 2.517688232421875, 2.51909326171875, 2.51841015625, 2.518773681640625, 2.518697998046875, 2.51839892578125, 2.518539306640625, 2.51853515625, 2.51841845703125, 2.51909326171875, 2.5179013671875, 2.5176484375, 2.5175888671875, 2.51825146484375, 2.518151123046875, 2.51841015625, 2.518299560546875, 2.518412353515625, 2.518192138671875, 2.5184072265625, 2.518162353515625, 2.519458740234375, 2.518036376953125, 2.517877685546875, 2.518980712890625, 2.520924072265625, 2.518964111328125, 2.520530029296875, 2.520292236328125, 2.5202412109375, 2.519448486328125, 2.519780517578125, 2.519730224609375, 2.5202646484375, 2.5176298828125, 2.51782763671875, 2.51780908203125, 2.51826171875, 2.51890185546875, 2.5179423828125, 2.518106201171875, 2.518285400390625, 2.5179658203125, 2.518643798828125, 5.21514599609375, 2.51869384765625, 2.518576171875, 2.51751318359375, 2.5187890625, 2.51831298828125, 2.51842138671875, 2.518813720703125, 2.51734619140625, 2.51774267578125, 2.51757568359375, 2.51732177734375, 2.51702783203125, 2.516744140625, 2.5167646484375, 2.517622802734375, 2.516887451171875, 2.5171630859375, 2.517233642578125, 2.517984375, 2.517812255859375, 2.517107666015625, 2.517927978515625, 2.5184912109375, 2.51740478515625, 2.517085205078125, 2.517383056640625, 2.518287353515625, 2.518308837890625, 2.51829248046875, 2.517562255859375, 2.51860888671875, 2.517875732421875, 2.517098388671875, 2.517589111328125, 2.518604736328125, 2.517823486328125, 2.517308349609375, 2.518150146484375, 2.518289306640625, 2.51751220703125, 2.51789208984375, 2.518724609375, 2.5184931640625, 2.5175673828125, 2.51732373046875, 2.517525390625, 2.5192119140625, 2.51820849609375, 2.519125, 2.518769775390625, 2.5183896484375, 2.5181767578125, 2.51797705078125, 2.51743017578125, 2.517621826171875, 2.517073974609375, 2.517191650390625, 2.51754296875, 2.517603271484375, 2.517738525390625, 2.51803857421875, 2.518088623046875, 5.21595068359375, 2.518022216796875, 2.518259765625, 2.517396484375, 2.517515380859375, 2.5171640625, 2.51678515625, 2.51755517578125, 2.51660693359375, 2.51696337890625, 2.5178798828125, 2.51924169921875, 2.518275146484375, 2.516697021484375, 2.51698486328125, 2.517755859375, 2.51724072265625, 2.518010986328125, 2.517244873046875, 2.5173779296875, 2.517291015625, 2.517590087890625, 2.518003662109375, 2.518558837890625, 2.519623779296875, 2.519237548828125, 2.520456298828125, 2.51768115234375, 2.518234130859375, 2.519458740234375, 2.52040283203125, 2.519605224609375, 2.518929443359375, 2.520151123046875, 2.51915869140625, 2.519528564453125, 2.51947119140625, 2.5201142578125, 2.519734375, 2.518724609375, 2.51780517578125, 2.51755224609375, 2.51803857421875, 2.5180732421875, 2.517747802734375, 2.517359619140625, 2.518424560546875, 2.51807421875, 2.518046630859375, 2.51700439453125, 2.51778759765625, 2.517309326171875, 2.51774365234375, 2.51673291015625, 2.517708740234375, 2.517607421875, 2.518066162109375, 2.51755517578125, 2.51816845703125, 2.518012939453125, 2.518370361328125, 2.51797412109375, 2.51844189453125, 5.22497119140625, 2.517222412109375, 2.51845947265625, 2.51675537109375, 2.517399658203125, 2.517277587890625, 2.518365234375, 2.517622802734375, 2.5181572265625, 2.517368896484375, 2.51894580078125, 2.51837548828125, 2.51816845703125, 2.517992431640625, 2.517802001953125, 2.5172060546875, 2.5187685546875, 2.517329833984375, 2.519754638671875, 2.5192744140625, 2.51748046875, 2.516811767578125, 2.517927001953125, 2.51881884765625, 2.5180302734375, 2.516874267578125, 2.518129638671875, 2.517221435546875, 2.517310546875, 2.517367919921875, 2.517562255859375, 2.516991943359375, 2.518096923828125, 2.516927490234375, 2.5179677734375, 2.517698486328125, 2.51718359375, 2.517350341796875, 2.51825244140625, 2.517243896484375, 2.5187900390625, 2.52037109375, 2.519783447265625, 2.51812255859375, 2.51873291015625, 2.5181962890625, 2.5185771484375, 2.518066162109375, 2.518571044921875, 2.51795361328125, 2.51880029296875, 2.5177548828125, 2.518213623046875, 2.51785009765625, 2.519160888671875, 2.518510498046875, 2.51831201171875, 2.51875830078125, 2.51904931640625, 2.51761865234375, 2.5184912109375, 2.51787158203125, 2.519330810546875, 5.22598291015625, 2.518825927734375, 2.518971435546875, 2.51806103515625, 2.518127685546875, 2.518425537109375, 2.51801806640625, 2.516579345703125, 2.51692041015625, 2.517222412109375, 2.517351318359375, 2.517392333984375, 2.51871630859375, 2.518453369140625, 2.518960205078125, 2.51763720703125, 2.517210205078125, 2.517708740234375, 2.517718017578125, 2.517530517578125, 2.51915478515625, 2.518562744140625, 2.51778759765625, 2.517306396484375, 2.5181123046875, 2.518414306640625, 2.520138671875, 2.517626953125, 2.518436767578125, 2.518436767578125, 2.518345703125, 2.517001220703125, 2.51755615234375, 2.518391845703125, 2.518742919921875, 2.51820751953125, 2.517982177734375, 2.52077978515625, 2.5205986328125, 2.51875634765625, 2.518046630859375, 2.5188701171875, 2.5200302734375, 2.517751708984375, 2.51822998046875, 2.51795458984375, 2.518422607421875, 2.520731689453125, 2.5178388671875, 2.518604736328125, 2.518803466796875, 2.517551025390625, 2.517665771484375, 2.518455322265625, 2.5181337890625, 2.516989013671875, 2.517697509765625, 2.518129638671875, 2.517895263671875, 2.517632080078125, 2.51766162109375, 2.518906982421875, 2.519458740234375, 5.22549267578125, 2.51939111328125, 2.518699951171875, 2.51726025390625, 2.517559326171875, 2.51808349609375, 2.5171669921875, 2.51778857421875, 2.517769287109375, 2.5189755859375, 2.520603759765625, 2.517000244140625, 2.5173525390625, 2.517968994140625, 2.517775390625, 2.517918701171875, 2.517655517578125, 2.51848193359375, 2.517267333984375, 2.517482421875, 2.51776708984375, 2.5178828125, 2.5175009765625, 2.517536865234375, 2.518390869140625, 2.51835498046875, 2.5190810546875, 2.519309326171875, 2.5178818359375, 2.51835791015625, 2.51839697265625, 2.51793505859375, 2.517909423828125, 2.517781494140625, 2.517467041015625, 2.517106689453125, 2.51734228515625, 2.518115234375, 2.518010986328125, 2.517520263671875, 2.51814404296875, 2.517705810546875, 2.517486572265625, 2.517609375, 2.51786865234375, 2.518467529296875, 2.51749169921875, 2.517338134765625, 2.51806005859375, 2.5187646484375, 2.518148193359375, 2.51829248046875, 2.51822705078125, 2.518614990234375, 2.518023193359375, 2.517477294921875, 2.51810107421875, 2.518518798828125, 2.517971923828125, 2.517927978515625, 2.518328369140625, 2.5179638671875, 2.51816455078125]",tokens/s,0.39111056758353424,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2276.958208,3364.356096,0.0,2717.908992,2483.645952,s,10,2.418925704956055,0.24189257049560547,0.0014063564969548042,0.2416701126098633,0.24295570678710937,0.24411388549804686,0.24504042846679686,"[0.24527206420898437, 0.24195974731445313, 0.2402183074951172, 0.2404288330078125, 0.24130831909179687, 0.2407073974609375, 0.24138047790527345, 0.24269833374023436, 0.24266143798828124, 0.24229078674316407]",tokens/s,1058.3210533316103,kWh,2.845328939812524e-06,1.5591126709426003e-06,1.357947514928401e-05,1.7983916760039136e-05,tokens/kWh,14234941.332070695,MB,2281.037824,3364.356096,0.0,2717.908992,2632.492032,s,10,138.94498046874997,13.894498046874997,0.004182595458186898,13.892857421875,13.8990578125,13.90156796875,13.903576093749999,"[13.894330078125, 13.904078125, 13.891740234375, 13.8913271484375, 13.8928662109375, 13.8928486328125, 13.890017578125, 13.8911748046875, 13.8985, 13.89809765625]",tokens/s,4.534168833408796,kWh,0.00016402044498967748,8.989529760271189e-05,0.0007398838220653231,0.0009937995646577126,tokens/kWh,63393.06459819053,,s,629,140.89449887084956,0.22399761346716945,0.028708018380316665,0.22048768615722655,0.22092697143554688,0.2211510284423828,0.4614811267089844,"[0.22103858947753907, 0.22030642700195313, 0.2203074493408203, 0.2203627471923828, 0.22044467163085937, 0.2201466827392578, 0.22047232055664062, 0.2204610595703125, 0.22034637451171876, 0.22052249145507813, 0.22052249145507813, 0.2210744323730469, 0.22037094116210937, 0.22095564270019533, 0.2205102081298828, 0.220400634765625, 0.2204180450439453, 0.22016717529296875, 0.22055328369140625, 0.22030943298339845, 0.22044979858398436, 0.2203740234375, 0.22033509826660155, 0.22029107666015624, 0.22042008972167967, 0.220221435546875, 0.22049996948242187, 0.22039552307128907, 0.2202757110595703, 0.2207139892578125, 0.22043545532226563, 0.2203248596191406, 0.22038528442382813, 0.22035661315917968, 0.22044058227539062, 0.2203135986328125, 0.2203863067626953, 0.22037504577636718, 0.22030848693847657, 0.22094744873046876, 0.22068121337890625, 0.22063002014160157, 0.2204917755126953, 0.22049693298339842, 0.22050607299804686, 0.22025112915039063, 0.2205347900390625, 0.22040882873535156, 0.2204559326171875, 0.22036376953125, 0.22044160461425782, 0.22035763549804688, 0.22027468872070313, 0.2205716552734375, 0.22072114562988282, 0.22048255920410156, 0.22129356384277343, 0.22104063415527345, 0.22131712341308593, 0.22103141784667968, 0.22057676696777342, 0.22042828369140624, 0.4625459289550781, 0.22034739685058594, 0.22049996948242187, 0.22023680114746094, 0.22024295043945313, 0.2206351318359375, 0.22068838500976562, 0.220474365234375, 0.22047129821777345, 0.22064947509765626, 0.22041087341308593, 0.22091673278808593, 0.22036787414550782, 0.22061465454101561, 0.22062591552734376, 0.22061363220214844, 0.2205153350830078, 0.2204753875732422, 0.22044979858398436, 0.22052864074707032, 0.22069862365722656, 0.22137957763671876, 0.22033920288085937, 0.22062387084960938, 0.220695556640625, 0.22112973022460938, 0.22057472229003905, 0.2208757781982422, 0.22068634033203124, 0.22061567687988282, 0.220548095703125, 0.22069247436523437, 0.2205368347167969, 0.22067916870117188, 0.22079180908203125, 0.22078463745117188, 0.22062591552734376, 0.2209423370361328, 0.2213263397216797, 0.22097305297851563, 0.22086349487304688, 0.22097613525390625, 0.22042726135253907, 0.2206965789794922, 0.22049996948242187, 0.22054502868652343, 0.22163456726074218, 0.22095359802246095, 0.22068019104003905, 0.2211655731201172, 0.2206842803955078, 0.22073651123046875, 0.22048973083496093, 0.22058706665039063, 0.22057977294921874, 0.22068844604492188, 0.22081939697265626, 0.2213744659423828, 0.2205665283203125, 0.22082252502441407, 0.2207262725830078, 0.22075187683105468, 0.22038937377929688, 0.4611839904785156, 0.22023577880859374, 0.22089727783203125, 0.22050405883789062, 0.22021734619140626, 0.22054400634765625, 0.22038528442382813, 0.2205655059814453, 0.2203248596191406, 0.22023884582519532, 0.22023583984375, 0.22047123718261719, 0.22023577880859374, 0.22024191284179687, 0.2201917419433594, 0.22043443298339843, 0.22027162170410156, 0.2202449951171875, 0.22028492736816407, 0.22072422790527343, 0.220980224609375, 0.2206771240234375, 0.22129458618164063, 0.22060134887695312, 0.2206730194091797, 0.2208123779296875, 0.22065858459472656, 0.22089830017089843, 0.22067507934570313, 0.2204436492919922, 0.22042930603027344, 0.22058087158203124, 0.22041497802734375, 0.22024908447265626, 0.22027162170410156, 0.22030540466308593, 0.22031564331054687, 0.220368896484375, 0.22108876037597655, 0.22054092407226564, 0.22035353088378906, 0.22040576171875, 0.22026138305664061, 0.22044467163085937, 0.2204559326171875, 0.22032179260253906, 0.22044773864746095, 0.22073036193847656, 0.22113075256347656, 0.22058393859863282, 0.22032691955566405, 0.22068531799316407, 0.22055833435058594, 0.22046412658691406, 0.22032383728027344, 0.22034637451171876, 0.22055322265625, 0.22052455139160157, 0.22036480712890624, 0.2204047393798828, 0.22033612060546875, 0.2206771240234375, 0.2206044158935547, 0.4615966796875, 0.22076824951171875, 0.220295166015625, 0.22087271118164062, 0.2203504638671875, 0.22047232055664062, 0.220189697265625, 0.22043238830566406, 0.22019378662109376, 0.22050816345214844, 0.220579833984375, 0.22056346130371093, 0.22019276428222656, 0.2204375, 0.22046617126464843, 0.2205716552734375, 0.22037196350097657, 0.22026649475097657, 0.22035661315917968, 0.22112562561035157, 0.22030950927734375, 0.22041293334960937, 0.2203873291015625, 0.22026853942871094, 0.22054502868652343, 0.22050201416015625, 0.22024806213378906, 0.2203146209716797, 0.22042930603027344, 0.22102117919921874, 0.22032896423339843, 0.22048768615722655, 0.22014976501464845, 0.22047232055664062, 0.22042112731933594, 0.2204569549560547, 0.2205122528076172, 0.22034226989746095, 0.22057778930664063, 0.22053375244140624, 0.220442626953125, 0.22042214965820311, 0.22033203125, 0.22048664855957031, 0.22074981689453124, 0.2204917755126953, 0.22120652770996094, 0.2206904296875, 0.22041497802734375, 0.22121676635742188, 0.2204784698486328, 0.2206033935546875, 0.22039450073242187, 0.22054400634765625, 0.22060850524902345, 0.22071705627441407, 0.22043443298339843, 0.22050816345214844, 0.2202931213378906, 0.2204794921875, 0.22053887939453126, 0.2205982666015625, 0.22036787414550782, 0.46206668090820313, 0.22080613708496094, 0.2205716552734375, 0.22023782348632812, 0.22025421142578125, 0.22019590759277344, 0.22057875061035156, 0.22045901489257813, 0.22017330932617188, 0.22030950927734375, 0.2208921661376953, 0.22050201416015625, 0.22088294982910156, 0.2204538879394531, 0.2203197479248047, 0.22046310424804688, 0.22013133239746094, 0.22019071960449219, 0.2202828826904297, 0.22072320556640626, 0.22030438232421876, 0.2206730194091797, 0.2204559326171875, 0.22029209899902344, 0.22052659606933595, 0.22039657592773437, 0.22047030639648438, 0.22110202026367187, 0.22078463745117188, 0.22039654541015624, 0.22048768615722655, 0.22048153686523436, 0.22034637451171876, 0.22068325805664063, 0.22042623901367187, 0.22046208190917968, 0.2204989471435547, 0.22047129821777345, 0.22049690246582032, 0.22055526733398437, 0.22030848693847657, 0.22025625610351562, 0.2204047393798828, 0.2205102081298828, 0.22039756774902344, 0.22056448364257814, 0.22109797668457032, 0.2207406005859375, 0.22061260986328124, 0.220548095703125, 0.2202931213378906, 0.22047334289550782, 0.22042828369140624, 0.22047334289550782, 0.22030032348632814, 0.22054191589355468, 0.22119935607910157, 0.22169293212890626, 0.2205419464111328, 0.220557373046875, 0.2205152587890625, 0.2205655059814453, 0.22039756774902344, 0.46115838623046873, 0.22026853942871094, 0.22028492736816407, 0.22010981750488282, 0.22020608520507812, 0.22032077026367186, 0.22018765258789064, 0.2204375, 0.2203811798095703, 0.22077133178710937, 0.22063206481933595, 0.22066073608398437, 0.22049075317382813, 0.22043034362792968, 0.22024191284179687, 0.22040882873535156, 0.22035865783691405, 0.2205982666015625, 0.22046514892578126, 0.220400634765625, 0.22039039611816405, 0.2204047393798828, 0.22018048095703124, 0.22042726135253907, 0.22025625610351562, 0.2204016571044922, 0.2204917755126953, 0.22091775512695314, 0.22068736267089845, 0.2210478057861328, 0.22050714111328126, 0.22068022155761718, 0.220601318359375, 0.2203811798095703, 0.22034022521972657, 0.22041497802734375, 0.22021632385253906, 0.22114303588867187, 0.22130073547363283, 0.22058905029296874, 0.22039961242675782, 0.22047027587890625, 0.22025830078125, 0.22062284851074218, 0.2205102081298828, 0.22040780639648438, 0.22047232055664062, 0.22069349670410157, 0.22081024169921876, 0.22071807861328124, 0.22037811279296876, 0.22054502868652343, 0.2205306854248047, 0.22061567687988282, 0.22122189331054687, 0.22058087158203124, 0.22052557373046874, 0.2210048065185547, 0.22053785705566406, 0.22060646057128908, 0.22035865783691405, 0.22059622192382813, 0.220516357421875, 0.46187826538085935, 0.22027877807617188, 0.2201661376953125, 0.22008934020996093, 0.22014874267578124, 0.22040780639648438, 0.220221435546875, 0.22034739685058594, 0.22072422790527343, 0.22045184326171874, 0.2204436798095703, 0.22067196655273438, 0.2206525421142578, 0.22057676696777342, 0.22060032653808595, 0.22078976440429687, 0.22035661315917968, 0.2203740234375, 0.22085427856445314, 0.2204600372314453, 0.220337158203125, 0.22026240539550782, 0.22036480712890624, 0.22081741333007812, 0.22036172485351563, 0.2203177032470703, 0.22025421142578125, 0.22035763549804688, 0.22030335998535155, 0.2207139892578125, 0.22059213256835938, 0.22072525024414064, 0.2207406005859375, 0.22064537048339844, 0.22042008972167967, 0.22125669860839844, 0.22061567687988282, 0.220474365234375, 0.22071807861328124, 0.2205347900390625, 0.2203627471923828, 0.2203146209716797, 0.2203248596191406, 0.22029823303222656, 0.22036376953125, 0.22047232055664062, 0.2202255401611328, 0.22048664855957031, 0.22040576171875, 0.22032179260253906, 0.22034025573730467, 0.2205460205078125, 0.22030950927734375, 0.22042317199707032, 0.22106214904785157, 0.22057267761230467, 0.220516357421875, 0.22052761840820312, 0.2204600372314453, 0.22071705627441407, 0.22034124755859374, 0.2203822021484375, 0.22037196350097657, 0.4634490966796875, 0.22028901672363282, 0.22034124755859374, 0.22013848876953124, 0.22024806213378906, 0.22021836853027343, 0.22028901672363282, 0.22026860046386718, 0.22030227661132812, 0.22021530151367188, 0.22014463806152343, 0.22054911804199218, 0.22034431457519532, 0.2202439727783203, 0.22034226989746095, 0.22027775573730468, 0.22059519958496093, 0.22043852233886718, 0.22060032653808595, 0.2206351318359375, 0.2210744323730469, 0.22051123046875, 0.22054092407226564, 0.2203863067626953, 0.22021427917480468, 0.22092697143554688, 0.22021324157714844, 0.22043136596679688, 0.2202408905029297, 0.22046208190917968, 0.2203074493408203, 0.2203504638671875, 0.2204190673828125, 0.2203514862060547, 0.2202593231201172, 0.2208204803466797, 0.2203146209716797, 0.22043034362792968, 0.22032896423339843, 0.22037196350097657, 0.22022860717773438, 0.22044058227539062, 0.22041293334960937, 0.2202449951171875, 0.22045082092285156, 0.22114303588867187, 0.22066893005371094, 0.2209300537109375, 0.22127410888671875, 0.22070477294921875, 0.2205368347167969, 0.22061158752441407, 0.2204927978515625, 0.22057676696777342, 0.2206525421142578, 0.22057066345214843, 0.22053884887695313, 0.22044467163085937, 0.22082867431640624, 0.22062899780273437, 0.22075289916992188, 0.2207139892578125, 0.2211584014892578, 0.4632709045410156, 0.22081024169921876, 0.22034022521972657, 0.22049996948242187, 0.2202265625, 0.22050099182128907, 0.2203822021484375, 0.2204600372314453, 0.22082150268554687, 0.2206904296875, 0.22041087341308593, 0.22061567687988282, 0.22044979858398436, 0.22075698852539063, 0.22043341064453126, 0.22061465454101561, 0.2210682830810547, 0.22072218322753906, 0.2204805145263672, 0.2206699523925781, 0.22046514892578126, 0.22043238830566406, 0.2206177215576172, 0.22051840209960938, 0.22042726135253907, 0.22032383728027344, 0.22085324096679687, 0.22037709045410156, 0.22035661315917968, 0.22064743041992188, 0.22058802795410157, 0.22054296875, 0.22046310424804688, 0.22043238830566406, 0.2203453369140625, 0.22068736267089845, 0.2204180450439453, 0.22066175842285157, 0.22066073608398437, 0.22049484252929688, 0.22063923645019531, 0.22058700561523437, 0.22040985107421876, 0.22107341003417968, 0.22086553955078125, 0.22104371643066406, 0.22082867431640624, 0.22134066772460936, 0.2208573455810547, 0.22092697143554688, 0.22046310424804688, 0.2206208038330078, 0.22063002014160157, 0.22059111022949218, 0.22057061767578126, 0.22050918579101564, 0.22044671630859375, 0.220727294921875, 0.22051840209960938, 0.22058802795410157, 0.22056243896484376, 0.22115635681152343, 0.22068940734863282, 0.46320025634765627, 0.22056346130371093, 0.22062899780273437, 0.22051840209960938, 0.22040882873535156, 0.22072012329101562, 0.22032691955566405, 0.2210713653564453, 0.22036070251464843, 0.220400634765625, 0.22021324157714844, 0.22049075317382813, 0.22033509826660155, 0.22090547180175782, 0.22041293334960937, 0.22086656188964843, 0.22052761840820312, 0.2203197479248047, 0.22071296691894532, 0.22047232055664062, 0.22075698852539063, 0.22052659606933595, 0.22043238830566406, 0.22035968017578125, 0.22096896362304688, 0.22059928894042968, 0.2214615020751953, 0.2207467498779297, 0.22179942321777343, 0.22081228637695313, 0.22052146911621093, 0.22039961242675782, 0.22029209899902344, 0.2204805145263672, 0.22017433166503905, 0.2204436492919922, 0.2202204132080078, 0.22042930603027344, 0.2203811798095703, 0.22039654541015624, 0.2202408905029297, 0.22037196350097657, 0.22073855590820313, 0.22145228576660156, 0.22041395568847658, 0.22063308715820312, 0.220368896484375, 0.22088499450683594, 0.22040882873535156, 0.22045901489257813, 0.22026649475097657, 0.22042726135253907, 0.22040985107421876, 0.22130482482910158, 0.22058700561523437, 0.22063002014160157, 0.22062693786621093, 0.22053887939453126, 0.22047640991210937, 0.22061158752441407, 0.2204375, 0.22050611877441406, 0.22054911804199218]",tokens/s,4.464333278026493,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3017-7b7c3b6c1514ca223738b4f9;91ce70ea-bd83-42c7-b07c-f8962244df2a) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1585.569792,9676.783616,0.0,9030.336512,8583.573504,s,10,9.573078491210937,0.9573078491210938,0.0009060771854647526,0.9570158996582031,0.9584730651855469,0.958728140258789,0.9589322003173828,"[0.9589832153320312, 0.956114501953125, 0.95637109375, 0.9565009765625, 0.9567921142578125, 0.9570035400390625, 0.9570282592773437, 0.95793017578125, 0.957938232421875, 0.9584163818359375]",tokens/s,267.41658938139295,kWh,1.1295311258296775e-05,6.189367993647465e-06,5.412797259527177e-05,7.161265184721602e-05,tokens/kWh,3574787.323141869,MB,1585.569792,9676.783616,0.0,9030.336512,8872.967168,s,10,567.96033984375,56.796033984375,0.005905783414625482,56.796150390625,56.802221875,56.803097265625,56.803797578125,"[56.78812109375, 56.796453125, 56.795734375, 56.80109765625, 56.800734375, 56.79584765625, 56.80202734375, 56.80397265625, 56.7909765625, 56.785375]",tokens/s,1.1092323808618707,kWh,0.0006704276505549146,0.00036745356669733154,0.0031837375343617275,0.004221618751613973,tokens/kWh,14923.185561442368,,s,629,575.6730700073241,0.9152195071658572,0.113749717069217,0.901496826171875,0.9019905639648437,0.9021755371093749,1.8586116552734375,"[0.9012899780273438, 0.9015009155273438, 0.9013851928710938, 0.9014036254882812, 0.9017272338867187, 0.9014722290039062, 0.9017364501953125, 0.9016832275390625, 0.901676025390625, 0.9014937744140625, 0.9013677978515625, 0.90115380859375, 0.9009315795898437, 0.9009326171875, 0.9006499633789062, 0.9008291625976562, 0.9008558349609375, 0.9008486328125, 0.9011026000976563, 0.9014824829101562, 0.901602294921875, 0.901296142578125, 0.9016278686523438, 0.9010196533203125, 0.901043212890625, 0.9009264526367188, 0.9010216674804687, 0.9008496704101563, 0.901128173828125, 0.9008875732421875, 0.901443603515625, 0.9013667602539063, 0.9012459716796875, 0.90106982421875, 0.9016514282226562, 0.9009028930664063, 0.9010933837890625, 0.9009028930664063, 0.9010616455078125, 0.9009520874023438, 0.901349365234375, 0.9014415283203125, 0.901707763671875, 0.9015367431640625, 0.9014681396484375, 0.9018613891601562, 0.901622802734375, 0.9018931274414063, 0.901707763671875, 0.9014057006835937, 0.901602294921875, 0.90210302734375, 0.9018941650390625, 0.9015654296875, 0.9017302856445313, 0.9016248168945312, 0.9020457153320313, 0.9013043212890625, 0.9014906616210937, 0.9013822021484375, 0.9013165283203125, 0.901349365234375, 1.86075439453125, 0.901201904296875, 0.9012777099609375, 0.9009407958984375, 0.9011107788085938, 0.9012059936523438, 0.901411865234375, 0.9011742553710937, 0.9014906616210937, 0.9008762817382813, 0.9011650390625, 0.9011548461914063, 0.9017159423828125, 0.9009899291992187, 0.9010339965820312, 0.9014087524414063, 0.901095458984375, 0.9012418212890625, 0.9011476440429688, 0.90099609375, 0.901496826171875, 0.9016145629882812, 0.9013197021484375, 0.9020405883789062, 0.9015439453125, 0.9012941284179687, 0.9013165893554688, 0.9014087524414063, 0.9009602661132813, 0.9014149169921875, 0.901086181640625, 0.901359619140625, 0.9010995483398437, 0.9015562133789062, 0.9015613403320313, 0.9021737060546875, 0.9016637573242188, 0.9020088500976563, 0.9014948120117188, 0.90166064453125, 0.901233642578125, 0.901607421875, 0.90140673828125, 0.9016708984375, 0.9019443359375, 0.9020333862304688, 0.90393701171875, 0.9017435913085937, 0.90208154296875, 0.9017467041015625, 0.901749755859375, 0.9017620239257812, 0.9014108276367188, 0.901612548828125, 0.9014630126953125, 0.901897216796875, 0.9016043701171875, 0.9021522216796874, 0.9015429077148438, 0.9017200927734375, 0.9018798217773437, 0.901327880859375, 0.9020098266601563, 1.8579189453125, 0.9009674072265625, 0.9019514770507813, 0.9013217163085937, 0.9013043212890625, 0.9010616455078125, 0.901369873046875, 0.9010882568359375, 0.9010647583007813, 0.9008700561523437, 0.901254150390625, 0.9011005249023437, 0.9012029418945312, 0.901781494140625, 0.9017036743164063, 0.9013206787109375, 0.901180419921875, 0.9015664672851562, 0.9012275390625, 0.901060546875, 0.9013350219726562, 0.9012828369140625, 0.9011179809570312, 0.902530029296875, 0.901454833984375, 0.9017108764648437, 0.9014998779296876, 0.9013156127929688, 0.9013903198242188, 0.901664794921875, 0.9012408447265625, 0.9015879516601563, 0.9012008666992187, 0.9010565185546875, 0.9010083618164062, 0.9016893310546875, 0.9017681884765625, 0.9018941650390625, 0.9012459716796875, 0.9014087524414063, 0.9018050537109376, 0.9015992431640625, 0.9015572509765625, 0.9015726318359375, 0.9014558715820312, 0.9014138793945312, 0.901760009765625, 0.9032969970703125, 0.9014159545898438, 0.9015982055664062, 0.90126953125, 0.9023529052734375, 0.901634033203125, 0.9018787841796875, 0.9014353637695313, 0.9016453247070313, 0.9016279296875, 0.901738525390625, 0.9017559204101563, 0.902043701171875, 0.9015582275390625, 0.9019248657226563, 0.9020723266601562, 1.8581309814453124, 0.9010073852539062, 0.9013156127929688, 0.9008977661132812, 0.9008128051757812, 0.9016401977539062, 0.9013319702148438, 0.9011097412109375, 0.9010739135742187, 0.9013370971679687, 0.9015664672851562, 0.9017160034179688, 0.9023712768554687, 0.9015736083984375, 0.9013422241210938, 0.901080078125, 0.9018327026367188, 0.90148046875, 0.9011190185546875, 0.901212158203125, 0.9018624267578125, 0.9014783935546875, 0.9012479858398438, 0.9013688354492188, 0.90138623046875, 0.9015767211914063, 0.90112109375, 0.901693359375, 0.901707763671875, 0.9020232543945312, 0.901571533203125, 0.9019771118164063, 0.901349365234375, 0.9013718872070312, 0.9014671630859376, 0.9014220581054687, 0.9012838134765625, 0.9017538452148437, 0.9016350708007812, 0.9019105224609375, 0.9021767578125, 0.901813232421875, 0.9018296508789062, 0.9015695190429688, 0.903024658203125, 0.902118408203125, 0.9014558715820312, 0.9017098388671875, 0.901375, 0.901796875, 0.90182861328125, 0.9018388671875, 0.901644287109375, 0.9020845947265625, 0.9015941162109375, 0.9019043579101562, 0.9019207763671875, 0.901602294921875, 0.9014589233398438, 0.9020272827148438, 0.9018408813476563, 0.9020088500976563, 0.9018572998046875, 1.8591446533203124, 0.9011109008789062, 0.9014906005859376, 0.9012500610351563, 0.9013309326171876, 0.9015132446289063, 0.9010267944335938, 0.90169140625, 0.9015234375, 0.9012705078125, 0.90149169921875, 0.90165869140625, 0.90100830078125, 0.9009920043945312, 0.9015848999023437, 0.9015347290039063, 0.9014989013671875, 0.9024532470703125, 0.9021880493164063, 0.9016944580078124, 0.9015951538085938, 0.9013145751953126, 0.90147021484375, 0.9014896850585937, 0.9010872192382813, 0.9012111206054687, 0.9012377319335938, 0.9011046142578125, 0.90134423828125, 0.9021696166992188, 0.9015510864257813, 0.9017293090820313, 0.9014609985351563, 0.9018982543945312, 0.9016985473632813, 0.9015664672851562, 0.9012459716796875, 0.9016145629882812, 0.9012357177734375, 0.9017845458984375, 0.9017640991210938, 0.9023355102539062, 0.9016002807617187, 0.9013688354492188, 0.9013800659179687, 0.9019535522460937, 0.9013340454101563, 0.9017763671875, 0.9015776977539063, 0.90165966796875, 0.9015429077148438, 0.9014620361328125, 0.9018777465820312, 0.9020886840820312, 0.9014589233398438, 0.9016187133789062, 0.901876708984375, 0.903267333984375, 0.9016299438476563, 0.9016832275390625, 0.901592041015625, 0.9018306274414063, 0.9016535034179688, 1.8594969482421875, 0.90172314453125, 0.901623779296875, 0.9014261474609375, 0.9010974731445313, 0.9016514282226562, 0.9012612915039062, 0.9012162475585938, 0.9010811157226563, 0.900874267578125, 0.9016196899414063, 0.9018163452148438, 0.9015050048828125, 0.9010811157226563, 0.9016350708007812, 0.9010995483398437, 0.9011435546875, 0.9012008666992187, 0.9010196533203125, 0.90096337890625, 0.9009110717773438, 0.9015501098632812, 0.9013790893554687, 0.901444580078125, 0.9009868774414063, 0.9019638061523437, 0.9014251708984375, 0.9013463134765625, 0.9010083618164062, 0.9014886474609375, 0.9014528198242188, 0.9012612915039062, 0.902129638671875, 0.9023948974609375, 0.9013688354492188, 0.9017241821289063, 0.90169140625, 0.9015562133789062, 0.9032264404296875, 0.90172509765625, 0.9013514404296875, 0.9015767211914063, 0.9013986206054687, 0.9023374633789063, 0.9012766723632812, 0.90123876953125, 0.9009448852539063, 0.9015643920898437, 0.9012295532226563, 0.9011148681640625, 0.9011896362304688, 0.9011199951171875, 0.9018316650390625, 0.9017835693359375, 0.9014640502929687, 0.9016084594726562, 0.90189208984375, 0.901802978515625, 0.9020620727539063, 0.9019801635742187, 0.9017937622070312, 0.9018091430664062, 0.90191357421875, 1.858798583984375, 0.9011435546875, 0.9014241333007813, 0.90132275390625, 0.9010053100585937, 0.9013289184570312, 0.9013585815429688, 0.9011660766601562, 0.9014292602539062, 0.901317626953125, 0.901381103515625, 0.901228515625, 0.9015480346679687, 0.9016514282226562, 0.9015521240234375, 0.90144873046875, 0.9019647827148437, 0.9018716430664062, 0.9012531127929687, 0.9012572021484375, 0.901212158203125, 0.9014169311523438, 0.901138427734375, 0.9026416625976562, 0.9016320190429687, 0.9017907104492188, 0.9014876098632812, 0.9016504516601562, 0.901855224609375, 0.9019913940429688, 0.9016053466796875, 0.9015654296875, 0.9016565551757812, 0.9016135864257813, 0.901591064453125, 0.9022617797851562, 0.901928955078125, 0.9018091430664062, 0.901897216796875, 0.9014537963867187, 0.9010780029296875, 0.9012725830078125, 0.9011988525390625, 0.9014630126953125, 0.90176416015625, 0.9015735473632812, 0.9017302856445313, 0.9021542358398438, 0.9014528198242188, 0.9019176635742188, 0.9015859375, 0.9015582275390625, 0.901423095703125, 0.9016873168945313, 0.9015879516601563, 0.9017313232421875, 0.9018091430664062, 0.902240234375, 0.9021122436523438, 0.9021419677734375, 0.9018674926757813, 0.9018511352539063, 0.9022258911132812, 1.8597550048828124, 0.9009326171875, 0.90142822265625, 0.901043212890625, 0.9013135375976562, 0.9019094848632813, 0.9014773559570313, 0.9016790771484375, 0.9016719360351563, 0.9012367553710937, 0.9012531127929687, 0.9010974731445313, 0.901064697265625, 0.90119677734375, 0.9015040283203125, 0.901970947265625, 0.90260888671875, 0.9018121948242187, 0.9016063842773437, 0.9018624267578125, 0.9015449829101563, 0.901992431640625, 0.9017988891601563, 0.901696533203125, 0.9012930297851562, 0.9012428588867187, 0.90197607421875, 0.9014159545898438, 0.90151220703125, 0.9018674926757813, 0.9016678466796875, 0.9016135864257813, 0.9015623779296875, 0.9015643920898437, 0.9014937744140625, 0.902150146484375, 0.9015090942382813, 0.9018839111328125, 0.9017354125976562, 0.9015787353515625, 0.9013851928710938, 0.9017507934570312, 0.9015869140625, 0.9017579345703125, 0.901917724609375, 0.9019903564453124, 0.90163916015625, 0.9023303833007813, 0.901960693359375, 0.9023068237304688, 0.9017538452148437, 0.9017886962890626, 0.901560302734375, 0.9016637573242188, 0.9014384765625, 0.9020631103515625, 0.9018910522460938, 0.9014251708984375, 0.9013934326171875, 0.9016893310546875, 0.9019873657226563, 0.9017251586914062, 0.901432373046875, 1.8595020751953124, 0.9010237426757812, 0.9012930297851562, 0.9010022583007813, 0.9012469482421875, 0.9015675048828125, 0.9011814575195313, 0.9016350708007812, 0.9019985961914062, 0.9014097900390625, 0.9013739624023438, 0.9014691772460938, 0.901528564453125, 0.9014323120117187, 0.90157568359375, 0.9015634155273438, 0.9018613891601562, 0.9021675415039062, 0.9016709594726563, 0.9019002685546875, 0.9019852905273438, 0.9018562622070313, 0.900979736328125, 0.90132373046875, 0.901507080078125, 0.9013309936523437, 0.9011680908203125, 0.9011712036132813, 0.9011783447265626, 0.9020948486328125, 0.9016350708007812, 0.9018470458984374, 0.9016832275390625, 0.901180419921875, 0.9011251220703125, 0.901518310546875, 0.9013657836914063, 0.9012398071289063, 0.90106982421875, 0.9011405029296875, 0.901254150390625, 0.9014476928710937, 0.9010718994140625, 0.9012920532226563, 0.90113330078125, 0.9011609497070312, 0.901159912109375, 0.9013043212890625, 0.9009213256835937, 0.9009336547851563, 0.90114453125, 0.9015357666015625, 0.9015296020507813, 0.9014456176757812, 0.9014261474609375, 0.9017907104492188, 0.9016565551757812, 0.9016832275390625, 0.90138623046875, 0.9014343872070313, 0.9012725830078125, 0.9016371459960938, 0.902470703125, 1.8604583740234375, 0.9015695190429688, 0.9012644653320312, 0.9009939575195313, 0.9012254638671875, 0.9013217163085937, 0.9012008666992187, 0.9012828369140625, 0.9010513916015624, 0.9015951538085938, 0.9012674560546875, 0.9009285278320313, 0.9009203491210938, 0.9009633178710937, 0.9009141845703125, 0.9008394165039062, 0.9009039306640625, 0.901094482421875, 0.9009048461914062, 0.9008230590820312, 0.9013002319335938, 0.9019085083007813, 0.9010206909179688, 0.9013463134765625, 0.9014251708984375, 0.9011159057617187, 0.9022166748046875, 0.9010892944335938, 0.900806640625, 0.9012715454101563, 0.9008599243164063, 0.9013524780273438, 0.9015029907226563, 0.9012828369140625, 0.9010565185546875, 0.9016555786132813, 0.9012265014648437, 0.90136474609375, 0.9013237915039063, 0.9012940673828125, 0.90121728515625, 0.9013165893554688, 0.901396484375, 0.9020753784179687, 0.9011875610351563, 0.9012091064453125, 0.9015101318359375, 0.9015930786132812, 0.90141796875, 0.9013585815429688, 0.9015480346679687, 0.9018736572265625, 0.9019412231445313, 0.9015439453125, 0.9019586791992188, 0.9018480834960938, 0.9013289184570312, 0.9018142700195313, 0.9015265502929688, 0.901791748046875, 0.9013944091796875, 0.9017569580078125, 0.9013258056640625]",tokens/s,1.0926340535470893,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,5984.595968,19933.954048,0.0,19287.506944,18376.399872,s,10,24.56320629882812,2.456320629882813,0.001691664801696018,2.4561865234375,2.4583423828125,2.4586625244140623,2.4589186376953127,"[2.456571533203125, 2.458982666015625, 2.4545791015625, 2.455032470703125, 2.454942626953125, 2.453741943359375, 2.455801513671875, 2.458271240234375, 2.45821728515625, 2.45706591796875]",tokens/s,104.22092168489151,kWh,2.8975297907988232e-05,1.5879395596475657e-05,0.00013865674981416998,0.00018351144331863386,tokens/kWh,1395008.3731590684,MB,5988.143104,19933.954048,0.0,19287.506944,18871.04,s,10,1458.421484375,145.84214843750001,0.012211079453744932,145.84282812499998,145.852815625,145.8578921875,145.8619534375,"[145.82440625, 145.837078125, 145.840359375, 145.8516875, 145.845296875, 145.8391875, 145.86296875, 145.8513125, 145.82034375, 145.84884375]",tokens/s,0.43197388872119064,kWh,0.001721854172928466,0.000943728472916573,0.008154870135002046,0.010820452780847084,tokens/kWh,5822.307187691273,,s,629,1478.2121892089838,2.350098869966589,0.29170661243792756,2.3148583984375,2.3159804199218748,2.3165888183593752,4.76929474609375,"[2.31657470703125, 2.316729248046875, 2.31575048828125, 2.31400439453125, 2.31361328125, 2.314355712890625, 2.31430859375, 2.313990234375, 2.314439697265625, 2.31400439453125, 2.314337158203125, 2.31450830078125, 2.3143525390625, 2.313754638671875, 2.3145595703125, 2.314746826171875, 2.313860107421875, 2.314616943359375, 2.31397998046875, 2.314455078125, 2.314322021484375, 2.315072509765625, 2.314198974609375, 2.314560546875, 2.31431689453125, 2.314648681640625, 2.314249267578125, 2.3142431640625, 2.31486767578125, 2.314310546875, 2.31423681640625, 2.314349609375, 2.31522509765625, 2.314227783203125, 2.314047607421875, 2.314390380859375, 2.31465380859375, 2.31459130859375, 2.3147294921875, 2.31495263671875, 2.31514306640625, 2.314708984375, 2.314705810546875, 2.314702880859375, 2.31510009765625, 2.314967041015625, 2.314390625, 2.3144326171875, 2.31436279296875, 2.3152353515625, 2.31499169921875, 2.31524560546875, 2.314324951171875, 2.3151728515625, 2.31494970703125, 2.314418212890625, 2.315260009765625, 2.3155927734375, 2.314987548828125, 2.3148759765625, 2.314437744140625, 2.314975341796875, 4.77530908203125, 2.315552734375, 2.313754638671875, 2.31431787109375, 2.314470458984375, 2.31491064453125, 2.313483154296875, 2.3140751953125, 2.314690673828125, 2.313935791015625, 2.31429541015625, 2.313943115234375, 2.3145830078125, 2.314388427734375, 2.3144130859375, 2.314663818359375, 2.314819580078125, 2.31438134765625, 2.31452978515625, 2.313989013671875, 2.314265625, 2.31451025390625, 2.31416015625, 2.313996337890625, 2.314390625, 2.314997802734375, 2.31469970703125, 2.31465380859375, 2.314850341796875, 2.315021240234375, 2.31556396484375, 2.314933349609375, 2.315074462890625, 2.314526611328125, 2.31516162109375, 2.314416015625, 2.31450634765625, 2.3146474609375, 2.31455322265625, 2.314646484375, 2.315080810546875, 2.314736572265625, 2.315146240234375, 2.316416015625, 2.316883056640625, 2.317212646484375, 2.317498291015625, 2.316708984375, 2.314590087890625, 2.31486669921875, 2.314609619140625, 2.314794921875, 2.3152783203125, 2.314463134765625, 2.31488720703125, 2.3152783203125, 2.3158486328125, 2.315106201171875, 2.31514208984375, 2.315134033203125, 2.314939453125, 2.31448974609375, 2.31490966796875, 4.76900048828125, 2.314322998046875, 2.31459423828125, 2.314704833984375, 2.3145400390625, 2.314397705078125, 2.315212890625, 2.315070556640625, 2.3151708984375, 2.314818603515625, 2.31474072265625, 2.314556396484375, 2.314756103515625, 2.314300537109375, 2.314501220703125, 2.314758056640625, 2.31469580078125, 2.314715087890625, 2.314291259765625, 2.313954345703125, 2.3144990234375, 2.3141162109375, 2.314789794921875, 2.3138662109375, 2.314201171875, 2.31396044921875, 2.314891357421875, 2.313994140625, 2.3142041015625, 2.31410595703125, 2.31444384765625, 2.314310546875, 2.3143935546875, 2.3148564453125, 2.314733642578125, 2.314330078125, 2.31438134765625, 2.3142685546875, 2.3148154296875, 2.315052978515625, 2.31476318359375, 2.31545751953125, 2.315313232421875, 2.315279296875, 2.315707275390625, 2.314758056640625, 2.3152158203125, 2.31581689453125, 2.315241455078125, 2.3146220703125, 2.315763671875, 2.3165615234375, 2.31669873046875, 2.3161025390625, 2.31678759765625, 2.31653173828125, 2.316707763671875, 2.3152138671875, 2.31585693359375, 2.314685546875, 2.315296875, 2.3153837890625, 2.315890625, 4.7694091796875, 2.314258544921875, 2.3142666015625, 2.31421435546875, 2.314051513671875, 2.314406982421875, 2.314616943359375, 2.314188720703125, 2.31503662109375, 2.31619287109375, 2.31446533203125, 2.313934814453125, 2.315040771484375, 2.314817626953125, 2.315828125, 2.3156357421875, 2.316775390625, 2.314610595703125, 2.31668017578125, 2.31729052734375, 2.31545556640625, 2.315461669921875, 2.315554931640625, 2.31465576171875, 2.314517578125, 2.3154697265625, 2.31606689453125, 2.314260498046875, 2.314863525390625, 2.314272705078125, 2.314649658203125, 2.3157421875, 2.315529296875, 2.3159091796875, 2.314768310546875, 2.3160771484375, 2.315504638671875, 2.314666015625, 2.314771484375, 2.315051025390625, 2.3154248046875, 2.314206298828125, 2.314478515625, 2.31497021484375, 2.315460693359375, 2.314807373046875, 2.31501708984375, 2.315440185546875, 2.315284423828125, 2.31450634765625, 2.314502197265625, 2.314387451171875, 2.314957763671875, 2.3149384765625, 2.314912841796875, 2.314670166015625, 2.3156572265625, 2.316516357421875, 2.3157412109375, 2.315861083984375, 2.315828125, 2.315040771484375, 2.3151728515625, 4.7702314453125, 2.31434033203125, 2.314682373046875, 2.313740234375, 2.314062744140625, 2.31486767578125, 2.314884033203125, 2.314498046875, 2.314965087890625, 2.314472412109375, 2.314133544921875, 2.31417041015625, 2.313879638671875, 2.313923583984375, 2.314146728515625, 2.314799072265625, 2.3140986328125, 2.314390625, 2.315798583984375, 2.31642822265625, 2.315537353515625, 2.316564453125, 2.314660888671875, 2.313965576171875, 2.314789794921875, 2.31402294921875, 2.314662841796875, 2.314347412109375, 2.315102294921875, 2.3144150390625, 2.315137939453125, 2.31604833984375, 2.315811767578125, 2.3149404296875, 2.31493115234375, 2.315124755859375, 2.3152158203125, 2.314859619140625, 2.315197509765625, 2.314984375, 2.315462646484375, 2.31446533203125, 2.3143095703125, 2.31438037109375, 2.31520556640625, 2.31512158203125, 2.314987548828125, 2.314483642578125, 2.31514208984375, 2.314712158203125, 2.31526513671875, 2.314966064453125, 2.314924072265625, 2.314987548828125, 2.31532958984375, 2.31476318359375, 2.3151298828125, 2.315618408203125, 2.316012451171875, 2.317046875, 2.316642333984375, 2.317076416015625, 2.317365234375, 4.7678056640625, 2.314080322265625, 2.31505908203125, 2.315978759765625, 2.315252685546875, 2.31602392578125, 2.3155966796875, 2.31449609375, 2.31440087890625, 2.31406396484375, 2.31448583984375, 2.314968017578125, 2.314263671875, 2.31400146484375, 2.314314697265625, 2.314521484375, 2.3153408203125, 2.31357861328125, 2.3147294921875, 2.31446728515625, 2.315052978515625, 2.314033203125, 2.314588134765625, 2.314469482421875, 2.31505517578125, 2.3144765625, 2.3147109375, 2.31465478515625, 2.315134033203125, 2.31459521484375, 2.31478466796875, 2.314600341796875, 2.314883056640625, 2.315136962890625, 2.315252685546875, 2.31488818359375, 2.315322265625, 2.31524755859375, 2.31511767578125, 2.3150263671875, 2.3153603515625, 2.315388916015625, 2.31535009765625, 2.31446826171875, 2.31495068359375, 2.31469775390625, 2.315263916015625, 2.314312744140625, 2.3146865234375, 2.314904541015625, 2.31448974609375, 2.314483642578125, 2.314174560546875, 2.314915771484375, 2.314462158203125, 2.3165869140625, 2.317093994140625, 2.31678759765625, 2.315781005859375, 2.315177978515625, 2.315336669921875, 2.31464453125, 2.31521484375, 4.770552734375, 2.314354736328125, 2.31482568359375, 2.313882568359375, 2.31376171875, 2.313788330078125, 2.315693115234375, 2.31585986328125, 2.31478271484375, 2.315926513671875, 2.3154453125, 2.315216796875, 2.314701904296875, 2.315554931640625, 2.31552197265625, 2.315801513671875, 2.316396484375, 2.314292236328125, 2.314648681640625, 2.31490771484375, 2.315061279296875, 2.3164384765625, 2.316370849609375, 2.316866455078125, 2.31659521484375, 2.31632080078125, 2.315591796875, 2.31587646484375, 2.3159716796875, 2.315903076171875, 2.315663330078125, 2.31596240234375, 2.316310546875, 2.314661865234375, 2.315462646484375, 2.31440380859375, 2.31526611328125, 2.315337646484375, 2.315576416015625, 2.31480419921875, 2.314577880859375, 2.316158935546875, 2.316590087890625, 2.31474072265625, 2.314672119140625, 2.315419677734375, 2.315107421875, 2.315031494140625, 2.314947509765625, 2.31512158203125, 2.31507861328125, 2.315275146484375, 2.31488916015625, 2.314337158203125, 2.315274169921875, 2.315357177734375, 2.314990478515625, 2.314895263671875, 2.315843505859375, 2.315281494140625, 2.315041748046875, 2.314649658203125, 2.3151708984375, 4.7742177734375, 2.314451904296875, 2.314017822265625, 2.316307373046875, 2.31636376953125, 2.315851806640625, 2.3162041015625, 2.315658203125, 2.315419677734375, 2.315569091796875, 2.31410693359375, 2.3144345703125, 2.3141181640625, 2.3143701171875, 2.314511474609375, 2.314544189453125, 2.31514208984375, 2.314577880859375, 2.314501220703125, 2.314337158203125, 2.315443115234375, 2.314441650390625, 2.314269775390625, 2.3141171875, 2.31440380859375, 2.315146240234375, 2.31492919921875, 2.314220458984375, 2.31446533203125, 2.3145595703125, 2.314896484375, 2.3150439453125, 2.314417236328125, 2.31532861328125, 2.315260009765625, 2.315807861328125, 2.31515234375, 2.315829345703125, 2.315790283203125, 2.3159296875, 2.31573193359375, 2.315969482421875, 2.316851318359375, 2.3155537109375, 2.314724365234375, 2.3145625, 2.315716552734375, 2.31531005859375, 2.315375732421875, 2.315345947265625, 2.31587744140625, 2.31564599609375, 2.315336669921875, 2.3145, 2.31510107421875, 2.31535400390625, 2.314984375, 2.31453076171875, 2.3153857421875, 2.314976318359375, 2.315102294921875, 2.31464453125, 2.31491064453125, 4.77313525390625, 2.31535205078125, 2.314526611328125, 2.3150869140625, 2.315890625, 2.314620849609375, 2.3140322265625, 2.313732177734375, 2.31478466796875, 2.31486669921875, 2.3156357421875, 2.31585986328125, 2.314743896484375, 2.314322021484375, 2.3139072265625, 2.31398291015625, 2.314142822265625, 2.31423388671875, 2.31518408203125, 2.31429833984375, 2.314175537109375, 2.3156572265625, 2.3145810546875, 2.313807861328125, 2.314330078125, 2.31499169921875, 2.314958740234375, 2.3144365234375, 2.3145185546875, 2.31440185546875, 2.315052978515625, 2.314460205078125, 2.3145419921875, 2.3140576171875, 2.314851318359375, 2.314546142578125, 2.314408935546875, 2.31389892578125, 2.31480419921875, 2.31436279296875, 2.3144580078125, 2.314177490234375, 2.314638427734375, 2.3145419921875, 2.31480322265625, 2.314745849609375, 2.314291259765625, 2.31537255859375, 2.31572265625, 2.31389599609375, 2.313974853515625, 2.314586181640625, 2.314895263671875, 2.314090576171875, 2.314375244140625, 2.314859619140625, 2.3148583984375, 2.3144326171875, 2.314412109375, 2.314270751953125, 2.31518115234375, 2.314324951171875, 2.314473388671875, 4.7714609375, 2.31414794921875, 2.313611328125, 2.314270751953125, 2.314892333984375, 2.31461572265625, 2.315198486328125, 2.3143720703125, 2.314142822265625, 2.31568701171875, 2.315873291015625, 2.31440478515625, 2.314210205078125, 2.31602587890625, 2.316198974609375, 2.31553857421875, 2.315966552734375, 2.31545947265625, 2.31545849609375, 2.31590185546875, 2.31619287109375, 2.31564599609375, 2.316030029296875, 2.315187255859375, 2.314642333984375, 2.313966552734375, 2.3152138671875, 2.31450830078125, 2.315548583984375, 2.314388427734375, 2.315707275390625, 2.3161650390625, 2.31663623046875, 2.316718994140625, 2.315875244140625, 2.315216796875, 2.31624609375, 2.315987060546875, 2.315421630859375, 2.315747314453125, 2.314558349609375, 2.313786376953125, 2.313956298828125, 2.3144541015625, 2.3148984375, 2.314367919921875, 2.31458203125, 2.315187255859375, 2.31473046875, 2.314349609375, 2.314651611328125, 2.31442626953125, 2.315454345703125, 2.31490771484375, 2.314850341796875, 2.315431884765625, 2.31543798828125, 2.314609619140625, 2.31511767578125, 2.314015625, 2.31457177734375, 2.314642333984375, 2.31451953125]",tokens/s,0.4255140125292758,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1510.83008,1619.525632,0.0,973.078528,855.737856,s,10,0.7501975097656249,0.07501975097656251,0.003494001966293861,0.0744185905456543,0.07687975234985352,0.0807021327972412,0.08376003715515137,"[0.08452451324462891, 0.0724151382446289, 0.07226691436767578, 0.07602333068847657, 0.07370025634765626, 0.072499267578125, 0.07526127624511719, 0.07513692474365234, 0.07233955383300782, 0.07603033447265625]",tokens/s,3412.4346811012333,kWh,8.781473283414488e-07,4.811843592162606e-07,2.329853715734e-06,3.6891854032917087e-06,tokens/kWh,69392012.60299407,MB,1510.83008,1619.525632,0.0,973.078528,915.411456,s,10,44.923669921875,4.492366992187501,0.05186865648995985,4.49665234375,4.532765380859375,4.566419799804687,4.593343334960937,"[4.4801484375, 4.46079150390625, 4.396806640625, 4.52528662109375, 4.43945751953125, 4.50497265625, 4.49965625, 4.4936484375, 4.52282763671875, 4.60007421875]",tokens/s,14.023787484317472,kWh,5.2254403288717625e-05,2.8638463246623156e-05,0.00013152905892685864,0.00021242192546219938,tokens/kWh,296579.5544076776,,s,629,45.51549240112303,0.07236167313374095,0.008872551278845286,0.07199948883056641,0.07308881988525391,0.07345274963378906,0.1417435186767578,"[0.07422156524658204, 0.07438028717041016, 0.07465984344482422, 0.07427993774414063, 0.070793212890625, 0.07357952117919922, 0.07368294525146485, 0.07249817657470703, 0.07192473602294921, 0.0720373764038086, 0.07181107330322266, 0.07195545959472656, 0.07189401245117187, 0.07177318572998047, 0.07202201843261719, 0.07179264068603515, 0.07144448089599609, 0.07195033264160157, 0.07206502532958985, 0.0718704605102539, 0.07257292938232422, 0.07203839874267579, 0.07174246215820312, 0.07193804931640625, 0.0720373764038086, 0.07203123474121094, 0.07179058837890626, 0.07161753845214844, 0.07207730865478515, 0.06936883544921875, 0.06907904052734375, 0.06904524993896484, 0.06900326538085938, 0.0691599349975586, 0.0692305908203125, 0.0704000015258789, 0.07194111633300782, 0.07188582611083984, 0.07171788787841797, 0.07179673767089843, 0.0721244125366211, 0.07180902099609375, 0.07232717132568359, 0.06903193664550782, 0.06900838470458984, 0.0690483169555664, 0.07060889434814453, 0.07242137908935548, 0.06946304321289062, 0.06903091430664063, 0.06926335906982421, 0.06909337615966797, 0.06909645080566407, 0.06918144226074219, 0.06912204742431641, 0.0692152328491211, 0.06930022430419921, 0.06917120361328125, 0.06931763458251954, 0.06909951782226563, 0.06903091430664063, 0.06910975646972656, 0.14174925231933594, 0.07115980529785157, 0.07259852600097656, 0.07286988830566406, 0.07273677062988282, 0.07269068908691406, 0.06918656158447266, 0.07026585388183594, 0.07330303955078125, 0.06960230255126953, 0.06916505432128907, 0.0694824981689453, 0.07036313629150391, 0.0726292495727539, 0.07242649841308593, 0.07292723083496094, 0.07256371307373047, 0.0725749740600586, 0.07288422393798828, 0.0727930908203125, 0.06923571014404296, 0.07285657501220703, 0.07292825317382813, 0.07260671997070313, 0.06921318054199219, 0.06922649383544922, 0.06944051361083985, 0.06901862335205078, 0.06910873413085937, 0.0694814682006836, 0.06917938995361328, 0.0691435546875, 0.06913843536376953, 0.06960230255126953, 0.07092428588867188, 0.07321907043457031, 0.07265586853027343, 0.07284429168701172, 0.07265996551513672, 0.07249919891357422, 0.0727388153076172, 0.06851993560791016, 0.06896537780761719, 0.06924390411376953, 0.06928793334960938, 0.06909951782226563, 0.06925926208496094, 0.06904115295410156, 0.06911590576171875, 0.07245005035400391, 0.07256063842773437, 0.07257087707519531, 0.0727162857055664, 0.0725555191040039, 0.07245414733886718, 0.06913638305664062, 0.06932685089111328, 0.06927565002441406, 0.06910566711425781, 0.06935040283203125, 0.06927974700927735, 0.06926643371582031, 0.06896947479248047, 0.1415485382080078, 0.06951423645019532, 0.06943334197998047, 0.06925619506835938, 0.06917324829101562, 0.06899097442626953, 0.06922752380371094, 0.06912000274658203, 0.06899199676513672, 0.06969446563720703, 0.07301529693603516, 0.06934323120117188, 0.06960537719726563, 0.07306034851074218, 0.07241011047363281, 0.07290675354003906, 0.07274700927734375, 0.0726476821899414, 0.07265996551513672, 0.0726824951171875, 0.07269068908691406, 0.06931763458251954, 0.06954188537597657, 0.06927257537841797, 0.06938623809814454, 0.06953062438964844, 0.06952960205078125, 0.06926233673095702, 0.06965862274169922, 0.06954803466796874, 0.06898995208740234, 0.06898892974853515, 0.0695767059326172, 0.06904729461669921, 0.06909030151367188, 0.0691230697631836, 0.06926131439208984, 0.06881587219238282, 0.06911795043945312, 0.0690145263671875, 0.069106689453125, 0.06901760101318359, 0.06876467132568359, 0.06921625518798828, 0.06918246459960938, 0.06903091430664063, 0.0689797134399414, 0.06893158721923828, 0.0690544662475586, 0.06931865692138672, 0.06913433837890624, 0.0691619873046875, 0.06918758392333985, 0.06922956848144532, 0.06901248168945312, 0.06939955139160156, 0.0690708465576172, 0.06908415985107422, 0.06909747314453125, 0.06877798461914063, 0.0688721923828125, 0.06958284759521484, 0.07407615661621093, 0.14876364135742187, 0.0725524444580078, 0.06927769470214844, 0.07208243560791015, 0.07297433471679687, 0.0731176986694336, 0.07251251220703125, 0.0712273941040039, 0.07263846588134766, 0.07023104095458985, 0.07284735870361328, 0.07289651489257812, 0.07278694152832031, 0.07275827026367188, 0.0733675537109375, 0.07290982055664062, 0.07269888305664063, 0.06938419342041016, 0.07068057250976563, 0.07282994842529297, 0.07284838104248047, 0.07352217864990235, 0.07396147155761719, 0.07278694152832031, 0.0727224349975586, 0.07267123413085938, 0.07291801452636719, 0.07321497344970704, 0.07286579132080079, 0.0730408935546875, 0.06961766052246093, 0.07272755432128906, 0.07313715362548828, 0.07269990539550782, 0.07287091064453124, 0.07342694091796875, 0.07279821014404297, 0.07299993896484375, 0.07290777587890625, 0.07300505828857422, 0.07294976043701172, 0.07384166717529297, 0.07301222229003906, 0.07304806518554688, 0.07300096130371093, 0.06921318054199219, 0.06943846130371094, 0.07083724975585938, 0.07355596923828125, 0.07319039916992187, 0.06939443206787109, 0.06938111877441407, 0.06927155303955078, 0.0692520980834961, 0.06911283111572265, 0.06944153594970703, 0.06942515563964843, 0.06953266906738281, 0.06910975646972656, 0.06917017364501953, 0.06945382690429687, 0.0691568603515625, 0.0722903060913086, 0.1428500518798828, 0.07174861145019532, 0.07272755432128906, 0.07288422393798828, 0.07220531463623046, 0.07277977752685547, 0.07296819305419922, 0.07285759735107422, 0.0730439682006836, 0.06917120361328125, 0.06948044586181641, 0.06915174102783203, 0.06912921905517579, 0.06931148529052734, 0.06940672302246094, 0.06928179168701172, 0.07211110687255859, 0.07298047637939453, 0.07278079986572265, 0.07271218872070312, 0.07268966674804687, 0.06976306915283204, 0.0731504669189453, 0.07382220458984375, 0.07313919830322266, 0.07456563568115235, 0.0726456298828125, 0.07287091064453124, 0.07290777587890625, 0.072595458984375, 0.06927974700927735, 0.06974668884277344, 0.06926131439208984, 0.06974668884277344, 0.0696094741821289, 0.0693565444946289, 0.06932173156738282, 0.06926541137695312, 0.06931763458251954, 0.06929203033447266, 0.06896025848388672, 0.0688875503540039, 0.0692838363647461, 0.06925107574462891, 0.06969344329833985, 0.06929510498046874, 0.06936064147949218, 0.06916607666015626, 0.06940364837646484, 0.06927667236328125, 0.0688721923828125, 0.06915071868896484, 0.06949273681640625, 0.06910361480712891, 0.06893260955810547, 0.06888857269287109, 0.06916710662841796, 0.06919065856933594, 0.06913433837890624, 0.06897663879394532, 0.06965760040283203, 0.06907596588134765, 0.06904524993896484, 0.14172877502441406, 0.06887628936767579, 0.06923980712890625, 0.06865408325195313, 0.06912102508544922, 0.06938317108154297, 0.06938521575927735, 0.06910771179199218, 0.06913843536376953, 0.06909951782226563, 0.07147007751464844, 0.07286374664306641, 0.07196057891845703, 0.07204454040527344, 0.07199948883056641, 0.07233126068115234, 0.07187763214111328, 0.07217971038818359, 0.07222169494628906, 0.07219097900390625, 0.07204966735839843, 0.07197491455078125, 0.07207833862304687, 0.07198822021484375, 0.0723394546508789, 0.07189299011230468, 0.0720547866821289, 0.07195750427246093, 0.0719288330078125, 0.07224626922607422, 0.0721981430053711, 0.0721295394897461, 0.07224729919433594, 0.07194624328613282, 0.07236812591552734, 0.07330508422851563, 0.07221247863769531, 0.07189094543457031, 0.07187763214111328, 0.0722503662109375, 0.07257292938232422, 0.07226982116699218, 0.07204761505126953, 0.07212134552001953, 0.07196876525878906, 0.07214694213867187, 0.07219404602050782, 0.07190940856933593, 0.07228310394287109, 0.0723609619140625, 0.07222476959228516, 0.07254732513427735, 0.07211724853515625, 0.07198617553710937, 0.07232102203369141, 0.0694302749633789, 0.06944563293457032, 0.06930226898193359, 0.06912000274658203, 0.07197286224365235, 0.07240499114990234, 0.07262515258789062, 0.07231488037109375, 0.1474877471923828, 0.07206195068359375, 0.07203635406494141, 0.0718704605102539, 0.07204147338867188, 0.07205785369873047, 0.07204249572753907, 0.07156735992431641, 0.0718551025390625, 0.07209369659423828, 0.07219404602050782, 0.07199334716796875, 0.07205683135986328, 0.0719974365234375, 0.07223910522460937, 0.07229440307617188, 0.0720373764038086, 0.07192985534667969, 0.07207218933105469, 0.07226060485839844, 0.07216844940185548, 0.07213158416748047, 0.07198822021484375, 0.07197695922851563, 0.07198515319824218, 0.07197901153564454, 0.07212748718261719, 0.07194111633300782, 0.07195136260986328, 0.07207936096191406, 0.07236505889892578, 0.07226982116699218, 0.06943231964111328, 0.06955929565429687, 0.069607421875, 0.06953164672851563, 0.07322624206542969, 0.07243981170654297, 0.07254937744140624, 0.07196979522705078, 0.07131033325195313, 0.07205785369873047, 0.07247872161865235, 0.07209471893310547, 0.07210291290283204, 0.06919782257080079, 0.06972108459472656, 0.06934835052490235, 0.06924082946777343, 0.06914765167236328, 0.06955519866943359, 0.06937907409667969, 0.06931148529052734, 0.06938829040527343, 0.06970674896240234, 0.06969548797607422, 0.06920089721679687, 0.07203942108154297, 0.07206809234619141, 0.07200972747802735, 0.07209779357910157, 0.07210291290283204, 0.07222681427001953, 0.14453248596191406, 0.06921011352539062, 0.0692326431274414, 0.06925312042236328, 0.06934220886230469, 0.07179571533203125, 0.07198617553710937, 0.07202098846435546, 0.07208243560791015, 0.07199436950683594, 0.07232921600341796, 0.07199027252197265, 0.07135231781005859, 0.07213772583007813, 0.07198207855224609, 0.07201996612548828, 0.07227597045898437, 0.0715315170288086, 0.07204863739013671, 0.0719319076538086, 0.07195442962646484, 0.0720148468017578, 0.07350169372558593, 0.07210291290283204, 0.07235584259033204, 0.07218790435791016, 0.07257804870605469, 0.07216844940185548, 0.07202713775634766, 0.07208550262451172, 0.07201689910888671, 0.0721786880493164, 0.07204557037353515, 0.07207218933105469, 0.0721070098876953, 0.0719482879638672, 0.07234662628173828, 0.07192985534667969, 0.07208345794677734, 0.07192371368408203, 0.07228108978271484, 0.07198925018310547, 0.07188787078857421, 0.07216639709472657, 0.07209062194824219, 0.07224217224121093, 0.07236914825439453, 0.07234559631347656, 0.07200768280029297, 0.07227903747558594, 0.07189913940429687, 0.06916403198242188, 0.06938009643554688, 0.0693903350830078, 0.06937190246582031, 0.06929714965820312, 0.06908108520507812, 0.06912102508544922, 0.06940876770019531, 0.06922752380371094, 0.06929100799560547, 0.06920191955566406, 0.06866534423828125, 0.14497279357910156, 0.07190630340576172, 0.07215411376953125, 0.07193702697753906, 0.07205785369873047, 0.07186431884765625, 0.07196672058105469, 0.07226060485839844, 0.07261901092529296, 0.07256473541259766, 0.0723609619140625, 0.07218585968017578, 0.07166259002685547, 0.07175373077392579, 0.0723773422241211, 0.0719626235961914, 0.07179878234863281, 0.072015869140625, 0.0722001953125, 0.07199641418457031, 0.07210495758056641, 0.07236300659179687, 0.07228006744384766, 0.07196672058105469, 0.07183257293701172, 0.07211519622802734, 0.07188480377197265, 0.07188070678710938, 0.07184076690673828, 0.07201689910888671, 0.07219404602050782, 0.07205068969726562, 0.07195442962646484, 0.07184076690673828, 0.07215513610839844, 0.07230873870849609, 0.07222067260742188, 0.07214284515380859, 0.07292928314208984, 0.07242649841308593, 0.07191654205322266, 0.07287398529052734, 0.07126732635498047, 0.07227391815185547, 0.07001599884033204, 0.07193292999267578, 0.07222681427001953, 0.07207218933105469, 0.07356108856201173, 0.07293440246582031, 0.07190835571289063, 0.07186943817138672, 0.06945689392089843, 0.06953164672851563, 0.06899814605712891, 0.06878720092773437, 0.06898278045654296, 0.06849638366699219, 0.06950809478759766, 0.07029452514648438, 0.07316889953613281, 0.07317708587646485, 0.07281254577636719, 0.14895103454589845, 0.07272550201416016, 0.07273779296875, 0.07310028839111328, 0.07281459045410156, 0.07278899383544922, 0.0730245132446289, 0.07308595275878907, 0.07223705291748046, 0.072880126953125, 0.07315660858154296, 0.07311974334716796, 0.07316172790527344, 0.07296717071533203, 0.07294464111328125, 0.07245823669433593, 0.07288217926025391, 0.07276441955566407, 0.07286784362792968, 0.07301734161376953, 0.07289548492431641, 0.07284735870361328, 0.07271321868896484, 0.07318630218505859, 0.07307878112792969, 0.07253504180908203, 0.07293644714355468, 0.07297843170166016, 0.07267430114746094, 0.07276748657226563, 0.07329894256591797, 0.07337881469726562, 0.0731668472290039, 0.07399219512939453, 0.07357746887207031, 0.07269580841064453, 0.0729169921875, 0.07252992248535156, 0.07297126770019531, 0.07306240081787109, 0.07285964965820313, 0.07280947113037109, 0.07383859252929688, 0.07331942749023437, 0.07286271667480469, 0.07300812530517578, 0.07352012634277344, 0.07395225524902344, 0.073385986328125, 0.07318630218505859, 0.07355289459228516, 0.07336959838867188, 0.07346995544433593, 0.0732938232421875, 0.07325389099121093, 0.07335321807861328, 0.07319347381591797, 0.0742266845703125, 0.0733337631225586, 0.0729722900390625, 0.0729200668334961, 0.07297740936279297, 0.0695920639038086]",tokens/s,13.819470400466983,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2593.296384,7298.613248,0.0,6652.166144,6323.352576,s,10,7.731728576660157,0.7731728576660156,0.0036666872570226623,0.7712686767578125,0.7772910278320313,0.7798124450683593,0.7818295788574219,"[0.7823338623046875, 0.776730712890625, 0.7708938598632813, 0.7714732666015625, 0.7710640869140625, 0.770478515625, 0.770045166015625, 0.773399658203125, 0.7746173706054688, 0.7706920776367188]",tokens/s,331.10319052428935,kWh,9.090474362556752e-06,4.981150633102515e-06,4.3684586229685554e-05,5.775621122534482e-05,tokens/kWh,4432423.709394238,MB,2593.296384,7298.613248,0.0,6652.166144,6382.565888,s,10,458.08653125,45.808653125,0.015714942284677954,45.80371484375,45.83385546875,45.83744140625,45.84031015625,"[45.8103125, 45.8045234375, 45.84102734375, 45.79598828125, 45.78718359375, 45.80095703125, 45.79937890625, 45.80290625, 45.8111953125, 45.83305859375]",tokens/s,1.3752860148079282,kWh,0.0005408116745630392,0.00029641227384910964,0.0025237221621385317,0.0033609461105506802,tokens/kWh,18744.721851454397,,s,629,464.3046874389652,0.7381632550698964,0.09178720852369766,0.7269785766601562,0.7282177978515625,0.72855,1.49778814453125,"[0.728890380859375, 0.7286569213867188, 0.728468505859375, 0.727841796875, 0.7261204223632812, 0.7262678833007813, 0.7260436401367187, 0.7263180541992188, 0.7261552734375, 0.7261777954101563, 0.727593994140625, 0.7266488037109375, 0.726371337890625, 0.7267962646484375, 0.7268167724609375, 0.72713623046875, 0.7261091918945313, 0.7263775024414062, 0.726086669921875, 0.7265126953125, 0.7268792114257813, 0.7265433349609375, 0.7264358520507812, 0.7269488525390625, 0.7277793579101562, 0.7264532470703124, 0.7258746948242187, 0.7262843017578124, 0.726619140625, 0.726530029296875, 0.726930419921875, 0.7266365356445312, 0.7263467407226563, 0.7267451171875, 0.7262003173828125, 0.7265435180664063, 0.7259226684570312, 0.7268444213867188, 0.727319580078125, 0.72618798828125, 0.725907470703125, 0.7261306762695312, 0.7267153930664062, 0.7260835571289063, 0.7304304809570312, 0.7278233642578125, 0.7278356323242188, 0.7286978759765625, 0.7283251342773438, 0.7282206420898437, 0.7269744873046875, 0.7285032958984375, 0.728458251953125, 0.7281141967773438, 0.7277547607421875, 0.727568359375, 0.7287777099609375, 0.727846923828125, 0.7280547485351563, 0.72765234375, 0.7284254760742187, 0.7282565307617187, 1.504901123046875, 0.7284664306640625, 0.7290664672851562, 0.7276646118164063, 0.7266221923828124, 0.7263283081054688, 0.7270154418945313, 0.7276861572265625, 0.7269110717773437, 0.7259544677734375, 0.7266682739257813, 0.7265515747070312, 0.7271710815429687, 0.7266058349609374, 0.7279667358398437, 0.7284111328125, 0.7286005859375, 0.726703125, 0.7264102172851562, 0.7261973876953125, 0.7271340942382812, 0.72724072265625, 0.7264429931640625, 0.726245361328125, 0.7261767578125, 0.7261071166992188, 0.72650244140625, 0.726097900390625, 0.7263776245117187, 0.726574951171875, 0.728237060546875, 0.7274116821289063, 0.7262125854492187, 0.7267153930664062, 0.7282913208007813, 0.726614013671875, 0.7263006591796874, 0.7260282592773437, 0.726560791015625, 0.7266262817382813, 0.7263662109375, 0.7263231811523437, 0.7260938110351562, 0.727815185546875, 0.7281008911132812, 0.727647216796875, 0.7297269897460937, 0.726920166015625, 0.72753564453125, 0.7265023803710937, 0.726255615234375, 0.7262545776367187, 0.7261071166992188, 0.72612353515625, 0.726687744140625, 0.726403076171875, 0.7265781860351562, 0.7276553955078126, 0.728158203125, 0.7277864990234375, 0.7276748657226563, 0.7281663818359375, 0.7285872802734376, 1.497443359375, 0.726253662109375, 0.7272754516601563, 0.7280455932617188, 0.7274752197265625, 0.7277711181640625, 0.728121337890625, 0.7287439575195312, 0.7278479614257812, 0.7272069091796876, 0.728195068359375, 0.7279093627929687, 0.7278653564453125, 0.7270317993164063, 0.7276226806640625, 0.7280025634765624, 0.7273707275390625, 0.7268731079101562, 0.7277240600585938, 0.7278991088867187, 0.728922119140625, 0.7292661743164063, 0.7271915283203125, 0.72674609375, 0.72665087890625, 0.7293204345703125, 0.7278919677734375, 0.7269775390625, 0.7271495971679688, 0.7265064697265625, 0.7276011352539062, 0.7278345947265625, 0.7281715087890624, 0.7285504150390625, 0.72888525390625, 0.7278458862304688, 0.7282175903320313, 0.7279083251953125, 0.7274700927734375, 0.72768408203125, 0.7280230102539063, 0.727125, 0.7273963623046875, 0.7267492065429687, 0.7279011840820313, 0.7279144897460937, 0.7274905395507812, 0.7290408935546875, 0.7272591552734375, 0.7271188354492187, 0.7262003173828125, 0.7260712890625, 0.7269284057617188, 0.7274291381835938, 0.7278981323242187, 0.7281622924804687, 0.727773193359375, 0.72789501953125, 0.7277240600585938, 0.727568359375, 0.727462890625, 0.72740966796875, 0.7279739379882812, 1.49783544921875, 0.7277803344726562, 0.7284193115234375, 0.7274598388671875, 0.727204833984375, 0.7272642822265625, 0.7271331787109375, 0.7269990234375, 0.72734619140625, 0.726697998046875, 0.7275581665039063, 0.7281787109375, 0.7283179321289063, 0.7270953369140625, 0.7263682250976562, 0.7266856689453125, 0.7272109985351562, 0.7262371826171875, 0.7263467407226563, 0.726561767578125, 0.7266099243164063, 0.7263775024414062, 0.7269427490234375, 0.7270051879882813, 0.7267901611328125, 0.7268003540039063, 0.7271588134765625, 0.7267839965820313, 0.7270287475585937, 0.7264204711914063, 0.7269273681640624, 0.7266437377929688, 0.7264727172851563, 0.7263098754882813, 0.7269898071289063, 0.72724072265625, 0.7265023803710937, 0.726582275390625, 0.727125, 0.7272601318359375, 0.7271116943359375, 0.7265310668945313, 0.7264296875, 0.7270942993164062, 0.72810498046875, 0.7268444213867188, 0.7269007568359375, 0.72627197265625, 0.7266754760742188, 0.7263273315429688, 0.726993896484375, 0.72631298828125, 0.727208984375, 0.7275560913085938, 0.7275110473632812, 0.7266375732421875, 0.7264818725585938, 0.7270154418945313, 0.726582275390625, 0.7271209106445312, 0.7262269287109375, 0.7262637939453125, 0.7265955810546875, 1.49766650390625, 0.7265485229492188, 0.7276533203125, 0.7271311645507812, 0.726666259765625, 0.7262494506835937, 0.7270901489257813, 0.7279677734375, 0.7269109497070313, 0.7260007934570313, 0.726369140625, 0.7265126342773438, 0.7262740478515625, 0.7264921875, 0.7263375244140625, 0.7265730590820313, 0.727419921875, 0.7267584228515624, 0.726582275390625, 0.7271137084960938, 0.7265505981445313, 0.726699951171875, 0.72631298828125, 0.7264921875, 0.7262166748046875, 0.7264942016601562, 0.727056396484375, 0.7262740478515625, 0.726329345703125, 0.72700927734375, 0.7266652221679688, 0.7271157836914063, 0.7266324462890625, 0.7262699584960938, 0.7262648315429687, 0.7263733520507812, 0.7264257202148438, 0.7263466796875, 0.7261910400390625, 0.726476806640625, 0.7270922241210938, 0.7264163818359375, 0.726287353515625, 0.726835205078125, 0.7277772827148438, 0.7272254638671874, 0.7271760864257812, 0.7273492431640625, 0.7281674194335938, 0.7272499389648438, 0.7273533325195313, 0.7268812866210937, 0.7270850830078125, 0.7263784790039063, 0.7263406372070312, 0.7266806030273437, 0.7265914916992188, 0.7289251708984374, 0.7269837036132812, 0.7268054809570312, 0.7262802124023438, 0.7263119506835938, 0.727593994140625, 1.498050537109375, 0.7272069091796876, 0.7269785766601562, 0.7263672485351562, 0.7267686157226563, 0.7265392456054688, 0.7266918334960938, 0.7279226684570312, 0.7270952758789062, 0.7273717651367188, 0.7269324951171875, 0.7273584594726562, 0.7267799072265625, 0.7266355590820313, 0.7265770874023437, 0.7268864135742188, 0.7267072143554687, 0.7268515625, 0.72646875, 0.7266506958007812, 0.7269765014648437, 0.7271884765625, 0.7277824096679687, 0.7268259887695312, 0.7264942016601562, 0.7269846801757812, 0.7266631469726562, 0.7266221923828124, 0.7265433959960937, 0.7265709228515626, 0.7269048461914063, 0.726640625, 0.7268331298828125, 0.7268157348632812, 0.7275376586914063, 0.7284869384765625, 0.72732568359375, 0.72707275390625, 0.7283681030273438, 0.7273052368164062, 0.727035888671875, 0.726771728515625, 0.7267860717773438, 0.7267205200195312, 0.7269447631835938, 0.7271106567382812, 0.7269212036132813, 0.7263958740234375, 0.7277291259765625, 0.7267225341796875, 0.7280814208984375, 0.726513671875, 0.72703076171875, 0.7273072509765625, 0.7264839477539062, 0.72660888671875, 0.7266047973632812, 0.7268126831054688, 0.7263416137695312, 0.7265904541015625, 0.727103515625, 0.72654541015625, 0.7273564453125, 1.4995804443359375, 0.7264389038085938, 0.72692529296875, 0.726814697265625, 0.7266785278320312, 0.7263928833007812, 0.7267000122070313, 0.726929443359375, 0.7269508666992187, 0.7267072143554687, 0.7272652587890625, 0.727456787109375, 0.7268905029296875, 0.7272182006835938, 0.7271659545898438, 0.7272396850585937, 0.727488525390625, 0.727024658203125, 0.7267123413085937, 0.7267573852539062, 0.72673486328125, 0.7267174682617188, 0.7264542846679688, 0.7265056762695312, 0.7268145141601563, 0.7267010498046875, 0.7269017333984376, 0.7276697387695312, 0.7267901611328125, 0.7262535400390625, 0.7270850830078125, 0.7274118041992188, 0.7274280395507813, 0.7267235717773437, 0.727041015625, 0.7266365356445312, 0.7266611328125, 0.7265853271484375, 0.7269519653320312, 0.7284623413085938, 0.7270697021484375, 0.7270390014648438, 0.7264204711914063, 0.7266826171875, 0.7274915771484375, 0.726845458984375, 0.72681982421875, 0.72749365234375, 0.72808447265625, 0.727667724609375, 0.726656005859375, 0.7265833129882813, 0.726640625, 0.7277496337890625, 0.7269703979492188, 0.7271680297851563, 0.7267593994140625, 0.7272683715820313, 0.727320556640625, 0.7269324951171875, 0.7269560546875, 0.7263651733398437, 0.7267870483398438, 1.500390380859375, 0.7265679321289062, 0.7275233154296875, 0.7270062255859375, 0.7268106079101563, 0.7261430053710938, 0.7260712890625, 0.7270625, 0.7281285400390625, 0.728479736328125, 0.7286661376953125, 0.7285464477539062, 0.7271463623046875, 0.7275120849609376, 0.7265228881835938, 0.7265628051757812, 0.7272130737304687, 0.7273912353515625, 0.7264358520507812, 0.7261306762695312, 0.7267123413085937, 0.7264491577148438, 0.72669287109375, 0.726719482421875, 0.7263631591796875, 0.7265198364257812, 0.7262894287109375, 0.7263252563476562, 0.7271505737304688, 0.7277117309570312, 0.7283435668945313, 0.7273421020507812, 0.7282186279296875, 0.7270390014648438, 0.7294136352539062, 0.7282667236328125, 0.7285493774414062, 0.7274598388671875, 0.7267921752929688, 0.7266416625976563, 0.72656591796875, 0.72635595703125, 0.7266129760742187, 0.7267891235351562, 0.7269498901367187, 0.7263416137695312, 0.72686181640625, 0.7261245727539063, 0.7262761840820312, 0.7264869995117188, 0.7264901123046875, 0.7260047607421874, 0.726487060546875, 0.7265208129882812, 0.7264603881835937, 0.7263908081054687, 0.7275448608398437, 0.727146484375, 0.72680859375, 0.7264603881835937, 0.7277178955078125, 0.72709326171875, 0.728501220703125, 1.5012095947265625, 0.72654541015625, 0.7268945922851563, 0.7272919311523437, 0.7272028198242187, 0.7270809326171875, 0.7276093139648437, 0.7282923583984375, 0.7271884765625, 0.7268485717773437, 0.726763427734375, 0.7262822265625, 0.7274660034179687, 0.7266007080078125, 0.72632421875, 0.7261010131835938, 0.726930419921875, 0.7262833251953125, 0.727236572265625, 0.7263109130859375, 0.727277587890625, 0.729017333984375, 0.7280557861328125, 0.7287122192382812, 0.726213623046875, 0.72635595703125, 0.7278028564453125, 0.7271577758789063, 0.7265413208007813, 0.7267686157226563, 0.7265740966796875, 0.7276277465820312, 0.727436279296875, 0.7270174560546875, 0.7273318481445312, 0.72755712890625, 0.72652392578125, 0.7278878784179688, 0.7272489013671875, 0.7272007446289063, 0.727320556640625, 0.7268229370117187, 0.7260794677734375, 0.7274660034179687, 0.726408203125, 0.726381591796875, 0.7260671997070313, 0.7263037719726563, 0.7268433837890625, 0.726957275390625, 0.7271473388671875, 0.7270225830078125, 0.7275509643554687, 0.7284777221679688, 0.7270584106445312, 0.7272191772460938, 0.727357421875, 0.7268218994140625, 0.7266734008789062, 0.726677490234375, 0.7279493408203125, 0.7279093627929687, 0.7280087280273437, 1.50259716796875, 0.7271526489257812, 0.727667724609375, 0.7279851684570312, 0.7274424438476562, 0.7262618408203125, 0.7266088256835938, 0.7269324951171875, 0.7273543701171875, 0.7273164672851562, 0.7271823120117188, 0.7268035278320313, 0.7278939208984375, 0.7264112548828126, 0.7268864135742188, 0.726513671875, 0.7265802001953126, 0.72677685546875, 0.7268116455078125, 0.7273564453125, 0.72827392578125, 0.72766259765625, 0.7272315063476562, 0.7262218017578125, 0.726414306640625, 0.7278561401367187, 0.728342529296875, 0.7276656494140625, 0.7273707275390625, 0.7264389038085938, 0.7276830444335938, 0.7273564453125, 0.7274208984375, 0.7277598876953125, 0.72755712890625, 0.7271044921875, 0.7269417114257812, 0.7284859008789063, 0.728322021484375, 0.727647216796875, 0.7293214721679687, 0.7274178466796875, 0.7273226318359375, 0.7271823120117188, 0.72783154296875, 0.7280087280273437, 0.7273554077148437, 0.7271423950195313, 0.7276287841796875, 0.7275499267578125, 0.7273082885742187, 0.7275765991210937, 0.7275704345703125, 0.7288955078125, 0.7285176391601562, 0.7280199584960938, 0.727841796875, 0.72796875, 0.7280548095703125, 0.7271168212890625, 0.7272642822265625, 0.7274669799804687, 0.7284869384765625]",tokens/s,1.3547138700439787,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1435.398144,8196.194304,0.0,7549.7472,6940.714496,s,10,6.101695007324219,0.6101695007324219,0.0005946945898735176,0.610120330810547,0.6105357299804687,0.6111274536132812,0.6116008325195312,"[0.6117191772460937, 0.6094563598632813, 0.6096676025390625, 0.609661865234375, 0.6102354736328125, 0.6099964599609375, 0.6100880737304688, 0.610152587890625, 0.6103131713867187, 0.6104042358398437]",tokens/s,419.55554922477825,kWh,7.199752165211573e-06,3.945167113402825e-06,3.557207094129864e-05,4.671699021991304e-05,tokens/kWh,5479805.07294925,MB,1435.398144,8196.194304,0.0,7549.7472,7094.0672,s,10,359.3025859375,35.93025859375,0.004744165567684035,35.92844140625,35.93707578125,35.938278125,35.93924,"[35.92640625, 35.93118359375, 35.93375390625, 35.9282890625, 35.93948046875, 35.93680859375, 35.92859375, 35.92646484375, 35.927859375, 35.92374609375]",tokens/s,1.753396787713594,kWh,0.00042417653058966,0.00023248355862878645,0.0021270168535728984,0.002783676942791345,tokens/kWh,22631.93656977539,,s,629,364.2223344116213,0.5790498162346918,0.0724714604302608,0.570292236328125,0.5706096801757813,0.570741552734375,1.1803278564453126,"[0.570017822265625, 0.5699727172851563, 0.5701652221679687, 0.5702686767578125, 0.5701201782226563, 0.5703833618164063, 0.5702440795898438, 0.5700086059570313, 0.5700679931640625, 0.5700485229492187, 0.5702072143554687, 0.5704335327148438, 0.5702359008789063, 0.5702522583007813, 0.5702430419921874, 0.5702123413085938, 0.5699952392578125, 0.5701222534179687, 0.570018798828125, 0.5700730590820312, 0.5700311279296875, 0.5700730590820312, 0.5705441284179688, 0.5700914916992188, 0.5701908569335937, 0.570039306640625, 0.5701427001953125, 0.5701519165039063, 0.570018798828125, 0.5704693603515625, 0.5701949462890625, 0.5705523071289063, 0.5700341796875, 0.570376220703125, 0.5702338256835937, 0.5700792236328125, 0.5701652221679687, 0.5701939086914063, 0.5703106689453125, 0.5701212158203125, 0.570377197265625, 0.5703854370117187, 0.5702174682617187, 0.57017138671875, 0.5701898193359375, 0.570281982421875, 0.5704048461914063, 0.5703516235351562, 0.5703372802734376, 0.5703587646484375, 0.570261474609375, 0.5706219482421875, 0.570228759765625, 0.5703167724609375, 0.5704724731445312, 0.570250244140625, 0.570260498046875, 0.5705205688476562, 0.5706659545898437, 0.5705738525390625, 0.5702584228515625, 0.5703618774414062, 1.180654541015625, 0.5706793212890625, 0.5700546264648437, 0.5700679931640625, 0.5699635009765625, 0.5702379760742188, 0.5699317626953125, 0.570335205078125, 0.5702041625976563, 0.5703126831054688, 0.5702020874023438, 0.57031884765625, 0.5707837524414062, 0.5704570922851563, 0.5700587768554688, 0.5700219116210937, 0.5701283569335938, 0.5701038208007813, 0.570292236328125, 0.5705799560546875, 0.5701365966796875, 0.5702850341796875, 0.5703014526367187, 0.5703843994140625, 0.5703741455078125, 0.570166259765625, 0.57010791015625, 0.5701795654296875, 0.5702543334960938, 0.5704069213867188, 0.5702564086914063, 0.5703895263671875, 0.5704017944335937, 0.5704888305664062, 0.5703884887695313, 0.5702901611328125, 0.5705441284179688, 0.5706096801757813, 0.5703915405273438, 0.5703915405273438, 0.5706311645507812, 0.5706148071289062, 0.5704232788085938, 0.5703792724609374, 0.5704356079101562, 0.5703280639648437, 0.5702430419921874, 0.5703812866210938, 0.5702645874023438, 0.5704130859375, 0.5704847412109375, 0.5704356079101562, 0.57042431640625, 0.5707622680664063, 0.5705584716796875, 0.5703618774414062, 0.5702379760742188, 0.5702461547851563, 0.5704683227539062, 0.5704161376953125, 0.5703577880859375, 0.5702789306640625, 0.570260498046875, 1.1801353759765625, 0.5700740966796874, 0.569970703125, 0.5700802612304687, 0.5700914916992188, 0.570197021484375, 0.5702625122070313, 0.57080322265625, 0.570260498046875, 0.5700802612304687, 0.5700341796875, 0.5702266845703124, 0.5705256958007813, 0.5703792724609374, 0.5704345703125, 0.570514404296875, 0.5704652709960938, 0.5702963256835938, 0.5707018432617188, 0.5703864135742187, 0.57019189453125, 0.5703259887695312, 0.5704099731445312, 0.5703536376953126, 0.5701396484375, 0.5705430908203125, 0.5701375732421875, 0.5701621704101563, 0.5701939086914063, 0.5704110107421875, 0.5704468383789062, 0.5702154541015625, 0.5702461547851563, 0.5703546752929688, 0.5702359008789063, 0.57044580078125, 0.5702901611328125, 0.5705093383789063, 0.5704324951171875, 0.5704232788085938, 0.5704314575195313, 0.570387451171875, 0.5705912475585937, 0.5705697021484375, 0.5704417114257813, 0.5703413696289062, 0.5704591064453125, 0.57055126953125, 0.5703720703125, 0.5705902099609375, 0.5705113525390625, 0.5704478759765625, 0.5705379638671875, 0.5709127807617187, 0.570514404296875, 0.5705072631835938, 0.5707048950195313, 0.5702860717773437, 0.5703311157226563, 0.5702573852539062, 0.5707479248046875, 0.570392578125, 0.5701949462890625, 1.1800667724609375, 0.5705154418945313, 0.5700843505859375, 0.5702154541015625, 0.5700464477539062, 0.5703096313476562, 0.570397705078125, 0.570355712890625, 0.5701539916992188, 0.5700249633789063, 0.5699932250976563, 0.5700464477539062, 0.5700894775390625, 0.57014990234375, 0.5704530029296875, 0.5702830200195312, 0.5701437377929688, 0.5704365844726562, 0.5703106689453125, 0.5702205200195313, 0.5703505859375, 0.5701375732421875, 0.5701458129882813, 0.5701437377929688, 0.570576904296875, 0.5702389526367188, 0.5702512817382812, 0.5702799072265625, 0.5704345703125, 0.570302490234375, 0.5701304321289062, 0.5706710815429688, 0.570281982421875, 0.5702225952148438, 0.5702225952148438, 0.5704365844726562, 0.570186767578125, 0.570102783203125, 0.5702564086914063, 0.5701857299804688, 0.57017138671875, 0.5702359008789063, 0.570239990234375, 0.5702092895507812, 0.5701734619140625, 0.5702164306640625, 0.5704601440429687, 0.570261474609375, 0.5702410278320312, 0.5704693603515625, 0.5702573852539062, 0.5701826782226562, 0.5703209228515626, 0.5703567504882813, 0.5702246704101562, 0.57021337890625, 0.5702625122070313, 0.5704263916015625, 0.5704110107421875, 0.5707018432617188, 0.5706383056640625, 0.5707294921875, 0.5706885375976563, 1.1809197998046874, 0.5703690185546875, 0.5707325439453125, 0.5704867553710937, 0.570355712890625, 0.57044482421875, 0.5706455078125, 0.5701949462890625, 0.5700352172851563, 0.5702830200195312, 0.5702727661132813, 0.5703720703125, 0.5707427978515625, 0.5703864135742187, 0.5704652709960938, 0.5703075561523437, 0.5703997192382813, 0.57046630859375, 0.5704171752929688, 0.570387451171875, 0.5707837524414062, 0.5702307739257813, 0.5702277221679688, 0.5705277709960938, 0.5703301391601563, 0.5702706909179688, 0.5701836547851562, 0.5702860717773437, 0.5702338256835937, 0.5703905029296875, 0.5705430908203125, 0.5707396850585937, 0.5704151000976563, 0.570481689453125, 0.5704909057617188, 0.5703987426757813, 0.5704140625, 0.5709854736328125, 0.5703946533203125, 0.5703843994140625, 0.5704007568359375, 0.5707048950195313, 0.5705410766601563, 0.570271728515625, 0.5703301391601563, 0.5703751831054688, 0.5703905029296875, 0.5704356079101562, 0.570440673828125, 0.5706219482421875, 0.570377197265625, 0.5703679809570312, 0.5704591064453125, 0.5703720703125, 0.570461181640625, 0.5709004516601562, 0.5705205688476562, 0.5705410766601563, 0.5705912475585937, 0.570640380859375, 0.5711124267578125, 0.5708656616210938, 0.5706577758789062, 1.1808399658203126, 0.5708021850585937, 0.5704427490234375, 0.5701908569335937, 0.5700781860351563, 0.5701417236328125, 0.5700914916992188, 0.5700802612304687, 0.5703956298828124, 0.5703792724609374, 0.570260498046875, 0.5703670043945313, 0.570735595703125, 0.5702215576171875, 0.570071044921875, 0.5703117065429687, 0.5704857788085937, 0.5703362426757812, 0.5710018310546875, 0.570376220703125, 0.570514404296875, 0.5702471923828125, 0.5702748413085937, 0.5703157958984375, 0.5701990356445312, 0.5703075561523437, 0.5709547729492187, 0.5701570434570312, 0.5700372314453125, 0.5704427490234375, 0.5703782348632812, 0.5703587646484375, 0.5703833618164063, 0.5702738037109375, 0.57059228515625, 0.5702174682617187, 0.570513427734375, 0.570365966796875, 0.570503173828125, 0.5704345703125, 0.570229736328125, 0.5707151489257812, 0.5703434448242187, 0.5710581665039063, 0.5702789306640625, 0.5702758178710937, 0.57023486328125, 0.5705379638671875, 0.5704007568359375, 0.570302490234375, 0.5705543823242187, 0.5705687255859375, 0.570534912109375, 0.5706015014648438, 0.5708339233398437, 0.5707653198242187, 0.5704918823242188, 0.570392578125, 0.5703731079101563, 0.5703987426757813, 0.5705912475585937, 0.5709179077148437, 0.5703670043945313, 1.1807825927734374, 0.5701509399414062, 0.5701007080078125, 0.5700905151367187, 0.5701478271484375, 0.5702461547851563, 0.5700157470703126, 0.57044580078125, 0.570102783203125, 0.5701734619140625, 0.57012939453125, 0.5702072143554687, 0.5703987426757813, 0.5702000732421875, 0.5708021850585937, 0.57046630859375, 0.570323974609375, 0.5701529541015625, 0.5706157836914062, 0.5701539916992188, 0.5702246704101562, 0.570071044921875, 0.57004541015625, 0.570197998046875, 0.5702573852539062, 0.5706096801757813, 0.570081298828125, 0.5700628662109375, 0.57021337890625, 0.5700740966796874, 0.5700669555664063, 0.5701386108398437, 0.5707857666015625, 0.5702532958984375, 0.5702543334960938, 0.5701437377929688, 0.5706444702148438, 0.570166259765625, 0.570166259765625, 0.5702123413085938, 0.570260498046875, 0.5701417236328125, 0.5706701049804688, 0.5704171752929688, 0.5703782348632812, 0.5703321533203125, 0.5704345703125, 0.5704058837890625, 0.5704703979492187, 0.5707151489257812, 0.5706455078125, 0.5702062377929688, 0.5701334838867187, 0.5703731079101563, 0.5704519653320312, 0.5702543334960938, 0.5702492065429687, 0.5701478271484375, 0.5702041625976563, 0.5701406860351562, 0.5706015014648438, 0.5703741455078125, 0.5703331909179687, 1.1804027099609375, 0.5701099243164063, 0.570060791015625, 0.5707489013671875, 0.570060791015625, 0.5700116577148437, 0.57004541015625, 0.570176513671875, 0.5700474853515625, 0.5701836547851562, 0.57000244140625, 0.570007568359375, 0.57016015625, 0.5703587646484375, 0.5702225952148438, 0.5700638427734375, 0.5701898193359375, 0.5701396484375, 0.5701703491210938, 0.5700423583984375, 0.5705799560546875, 0.5704273681640625, 0.570176513671875, 0.5700433959960938, 0.5704765625, 0.5706629028320312, 0.570208251953125, 0.5702266845703124, 0.5704099731445312, 0.5703434448242187, 0.5707540283203125, 0.5704099731445312, 0.5702256469726562, 0.570092529296875, 0.570076171875, 0.5700700073242188, 0.570123291015625, 0.5702697143554688, 0.5704888305664062, 0.5702543334960938, 0.5701212158203125, 0.5701160888671875, 0.57030859375, 0.5702307739257813, 0.57025537109375, 0.5702860717773437, 0.5701652221679687, 0.5702532958984375, 0.5705809936523437, 0.5704335327148438, 0.5703147583007813, 0.5702205200195313, 0.5703004150390625, 0.5702901611328125, 0.5703424072265625, 0.570323974609375, 0.57051953125, 0.570292236328125, 0.5702225952148438, 0.5703424072265625, 0.5705471801757812, 0.5703259887695312, 0.5703372802734376, 1.180564453125, 0.570250244140625, 0.5700833129882813, 0.570123291015625, 0.5700126953125, 0.5700361938476562, 0.570144775390625, 0.5701990356445312, 0.57012939453125, 0.570377197265625, 0.5702850341796875, 0.5701099243164063, 0.5703854370117187, 0.5702758178710937, 0.57021337890625, 0.5702860717773437, 0.5702338256835937, 0.57029833984375, 0.5704212646484375, 0.5705707397460937, 0.570218505859375, 0.57014990234375, 0.5701017456054688, 0.5704498901367188, 0.5705891723632812, 0.5702011108398437, 0.5701519165039063, 0.5703117065429687, 0.5702676391601562, 0.57019189453125, 0.570197998046875, 0.5702072143554687, 0.57019189453125, 0.5701068725585937, 0.5702052001953125, 0.5701652221679687, 0.5702225952148438, 0.5702748413085937, 0.5703250122070312, 0.5701754760742187, 0.5702564086914063, 0.57019189453125, 0.5701898193359375, 0.5701693725585938, 0.5705338745117188, 0.5704939575195312, 0.5702215576171875, 0.5701632080078125, 0.5703229370117188, 0.5706946411132813, 0.5702041625976563, 0.5702686767578125, 0.5705799560546875, 0.570397705078125, 0.5707550659179688, 0.5704202270507812, 0.5704949951171875, 0.5702778930664063, 0.5703250122070312, 0.5701703491210938, 0.5702062377929688, 0.5706311645507812, 0.57051953125, 1.1806351318359376, 0.5705595092773438, 0.5698693237304687, 0.5699307250976563, 0.570076171875, 0.5700352172851563, 0.5700628662109375, 0.5699164428710938, 0.5702594604492187, 0.57012939453125, 0.5700106201171875, 0.5700147094726562, 0.570076171875, 0.570018798828125, 0.5700126953125, 0.570271728515625, 0.5703526611328125, 0.5701048583984375, 0.5700587768554688, 0.5703884887695313, 0.570144775390625, 0.569975830078125, 0.5700567016601562, 0.5700003662109375, 0.5700485229492187, 0.5704652709960938, 0.570113037109375, 0.570461181640625, 0.5703075561523437, 0.5700106201171875, 0.5700986938476562, 0.570166259765625, 0.5703301391601563, 0.5704437866210937, 0.5701642456054687, 0.5700781860351563, 0.5700567016601562, 0.5701621704101563, 0.570197998046875, 0.570197021484375, 0.5700966186523437, 0.5701263427734375, 0.570017822265625, 0.5705093383789063, 0.57034033203125, 0.5703987426757813, 0.5703218994140625, 0.5703485717773438, 0.5702573852539062, 0.57031884765625, 0.5704970092773437, 0.570545166015625, 0.5701683349609376, 0.5701437377929688, 0.57034033203125, 0.5705051879882812, 0.5702041625976563, 0.5704263916015625, 0.5704058837890625, 0.5704570922851563, 0.5706854248046875, 0.5703546752929688, 0.5704683227539062]",tokens/s,1.7269671312609398,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1575.784448,5448.925184,0.0,4802.47808,4489.252352,s,10,5.114569244384766,0.5114569244384766,0.0014045752365005437,0.51150537109375,0.5130841125488281,0.5135362274169921,0.5138979193115234,"[0.512983642578125, 0.5139883422851562, 0.5091934204101562, 0.5101927795410156, 0.5104193725585937, 0.5101881103515625, 0.5112746887207031, 0.5121220092773437, 0.5117360534667968, 0.5124708251953125]",tokens/s,500.5309103617276,kWh,6.020330099595917e-06,3.2988774182740595e-06,2.7782869448500503e-05,3.710207696637048e-05,tokens/kWh,6899883.266158921,MB,1575.784448,5448.925184,0.0,4802.47808,4557.794816,s,10,301.333625,30.133362499999997,0.011965382718741807,30.1356015625,30.146014453125,30.1513353515625,30.1555920703125,"[30.12017578125, 30.120552734375, 30.1369375, 30.140724609375, 30.15665625, 30.137947265625, 30.123359375, 30.14483203125, 30.118173828125, 30.134265625]",tokens/s,2.0907059409649356,kWh,0.00035569889715148347,0.00019495335105611957,0.001580753417379499,0.002131405665587102,tokens/kWh,29557.958401431977,,s,629,305.4536612854004,0.48561790347440437,0.060780789003776696,0.4781598815917969,0.47936102294921873,0.4797579284667969,0.9888164648437501,"[0.47731610107421873, 0.4777103271484375, 0.4780533752441406, 0.4779223022460938, 0.47746868896484373, 0.47739285278320315, 0.4777676696777344, 0.47874050903320314, 0.47792022705078124, 0.4776212463378906, 0.47766937255859376, 0.4774625244140625, 0.4779018249511719, 0.47790591430664064, 0.47894528198242187, 0.47799398803710935, 0.47878759765625, 0.47878964233398436, 0.47852645874023436, 0.47817422485351563, 0.4778905334472656, 0.47780557250976563, 0.47809637451171877, 0.47767245483398435, 0.477765625, 0.4778649597167969, 0.4779622497558594, 0.47779531860351565, 0.47874456787109376, 0.47817214965820315, 0.4782950439453125, 0.47745944213867186, 0.47759051513671874, 0.4785008544921875, 0.47783013916015626, 0.4780267639160156, 0.4782438354492187, 0.4785858459472656, 0.4793190307617187, 0.47905487060546875, 0.47873635864257813, 0.47829400634765623, 0.4777697143554688, 0.4778240661621094, 0.47742047119140624, 0.4780851135253906, 0.4782008361816406, 0.47801651000976564, 0.47831243896484377, 0.47876199340820313, 0.4778547058105469, 0.47859405517578124, 0.4782591857910156, 0.47776461791992186, 0.47824075317382814, 0.4776304626464844, 0.4778383483886719, 0.4776642456054688, 0.47760385131835936, 0.478160888671875, 0.47876300048828124, 0.4783943786621094, 0.9927915649414063, 0.47756494140625, 0.4774000549316406, 0.47765298461914063, 0.47747378540039065, 0.4782704772949219, 0.477907958984375, 0.4783944091796875, 0.477813720703125, 0.47758950805664063, 0.47759051513671874, 0.4776069030761719, 0.47729867553710936, 0.4773724060058594, 0.47751473999023436, 0.47745742797851565, 0.4780113525390625, 0.4786903076171875, 0.47860427856445314, 0.4783482971191406, 0.47794790649414065, 0.47776461791992186, 0.4778291320800781, 0.4779427795410156, 0.4779376525878906, 0.47769189453125, 0.47908352661132814, 0.47773797607421875, 0.47901287841796875, 0.4777134094238281, 0.4779151306152344, 0.47777587890625, 0.47748709106445314, 0.47771136474609377, 0.4781527099609375, 0.47798886108398436, 0.47750042724609376, 0.47809127807617186, 0.47905892944335937, 0.4782591857910156, 0.4781598815917969, 0.4782981262207031, 0.4776468505859375, 0.47786392211914064, 0.4776560668945313, 0.478023681640625, 0.48116427612304685, 0.4793456726074219, 0.4777492370605469, 0.47873126220703127, 0.47907122802734375, 0.4783626098632813, 0.4779949951171875, 0.47790286254882813, 0.4776908874511719, 0.478065673828125, 0.4777738342285156, 0.477770751953125, 0.47832369995117185, 0.47939175415039065, 0.47875686645507814, 0.47773284912109376, 0.478023681640625, 0.988906494140625, 0.47783013916015626, 0.4776437683105469, 0.4778649597167969, 0.4783902587890625, 0.47753521728515624, 0.4795494384765625, 0.47817214965820315, 0.4789770202636719, 0.4776386413574219, 0.47787213134765627, 0.47758950805664063, 0.47774514770507814, 0.47762841796875, 0.47790386962890624, 0.479541259765625, 0.4791797790527344, 0.47914700317382813, 0.4779294738769531, 0.4777625732421875, 0.47816497802734376, 0.4782356567382812, 0.47807180786132814, 0.4797726745605469, 0.4777687072753906, 0.4803246154785156, 0.4800747375488281, 0.4793231506347656, 0.4796539001464844, 0.48039935302734377, 0.4796252136230469, 0.4790927429199219, 0.477655029296875, 0.4791510925292969, 0.4787394409179688, 0.47877017211914064, 0.47938558959960936, 0.47959552001953126, 0.47838516235351564, 0.47773593139648435, 0.478129150390625, 0.4781404113769531, 0.4776857604980469, 0.4779018249511719, 0.47747378540039065, 0.4776847229003906, 0.4781803588867187, 0.4799620971679687, 0.4786053161621094, 0.47794790649414065, 0.4777123718261719, 0.4776683654785156, 0.4774696960449219, 0.47712460327148437, 0.4774481811523438, 0.47752908325195315, 0.47732632446289064, 0.4782438354492187, 0.47871487426757814, 0.47837286376953125, 0.4777123718261719, 0.47754238891601564, 0.4779929504394531, 0.9885849609375, 0.47834625244140627, 0.47808615112304687, 0.47836468505859375, 0.47836468505859375, 0.4776908874511719, 0.47762738037109376, 0.4779254455566406, 0.477751220703125, 0.4784117736816406, 0.47790286254882813, 0.4781741943359375, 0.4787138671875, 0.47869439697265626, 0.47841998291015625, 0.4790876159667969, 0.47936920166015623, 0.4789770202636719, 0.4778157958984375, 0.47849676513671874, 0.4780349426269531, 0.4785776672363281, 0.4781475830078125, 0.4785581970214844, 0.4792135620117188, 0.47874969482421875, 0.4787026062011719, 0.47837899780273435, 0.47872308349609377, 0.47839334106445314, 0.4786411437988281, 0.4790169677734375, 0.47768063354492185, 0.4778465270996094, 0.4781793212890625, 0.47906610107421876, 0.47883367919921876, 0.4782213134765625, 0.4785172424316406, 0.47826739501953125, 0.478087158203125, 0.4777277526855469, 0.4778680419921875, 0.47746456909179685, 0.4802908020019531, 0.47867086791992186, 0.47896060180664063, 0.4792279052734375, 0.4781783142089844, 0.47810867309570315, 0.4786063232421875, 0.4785162353515625, 0.4785848388671875, 0.4778659973144531, 0.4776212463378906, 0.4777840576171875, 0.4788787231445312, 0.47973171997070313, 0.47971328735351565, 0.4792842102050781, 0.477655029296875, 0.47770315551757814, 0.4779049377441406, 0.9883463134765625, 0.4791357421875, 0.4782458801269531, 0.4794173583984375, 0.478166015625, 0.47800833129882814, 0.4775157775878906, 0.4780421142578125, 0.47852032470703126, 0.4791224365234375, 0.47801651000976564, 0.47799398803710935, 0.4782530517578125, 0.47967129516601564, 0.47834417724609374, 0.4779847717285156, 0.4790302734375, 0.47956378173828124, 0.4773304443359375, 0.4774420471191406, 0.4774143981933594, 0.4773447570800781, 0.47814349365234377, 0.47828582763671873, 0.479710205078125, 0.4788910217285156, 0.479056884765625, 0.47832369995117185, 0.4782438354492187, 0.4783964233398437, 0.479025146484375, 0.4786606140136719, 0.4782909545898438, 0.47821823120117185, 0.4802129821777344, 0.47790286254882813, 0.47835751342773436, 0.4782561340332031, 0.478635009765625, 0.47870156860351565, 0.4777001037597656, 0.4775802917480469, 0.47765914916992186, 0.479578125, 0.4784527282714844, 0.47922894287109374, 0.47923199462890625, 0.4788213806152344, 0.47869439697265626, 0.4787804260253906, 0.47939993286132815, 0.47935186767578125, 0.4787639770507813, 0.4786268310546875, 0.4793589782714844, 0.48041677856445314, 0.48034817504882815, 0.482777099609375, 0.4785745849609375, 0.478571533203125, 0.47958123779296874, 0.47865029907226564, 0.4787384338378906, 0.990581787109375, 0.4781793212890625, 0.4788521423339844, 0.47797039794921875, 0.47851007080078123, 0.47790286254882813, 0.4778321838378906, 0.47813427734375, 0.4789043273925781, 0.47835134887695313, 0.47826226806640626, 0.47895858764648436, 0.4790947875976562, 0.4783759460449219, 0.47830938720703126, 0.4781803588867187, 0.477991943359375, 0.47756903076171875, 0.47764480590820313, 0.47726080322265624, 0.47773797607421875, 0.47950848388671874, 0.47766015625, 0.47899853515625, 0.47830322265625, 0.4781537170410156, 0.47825204467773436, 0.4785592346191406, 0.479025146484375, 0.47944705200195314, 0.47835134887695313, 0.478445556640625, 0.47876199340820313, 0.47891659545898435, 0.4791101379394531, 0.47820391845703125, 0.4782581787109375, 0.47862374877929686, 0.4814622802734375, 0.47808819580078127, 0.47835751342773436, 0.47878964233398436, 0.4785551452636719, 0.4777082824707031, 0.4787271728515625, 0.4784322509765625, 0.47835546875, 0.4775679931640625, 0.4779346008300781, 0.47834521484375, 0.478160888671875, 0.47767962646484374, 0.47781170654296873, 0.4783585205078125, 0.4791285705566406, 0.4783697814941406, 0.47800216674804685, 0.47763250732421875, 0.4782294921875, 0.47773696899414064, 0.47801651000976564, 0.4774718017578125, 0.47802670288085936, 0.9896888427734375, 0.47829608154296877, 0.47802975463867187, 0.47756497192382813, 0.47750039672851563, 0.47811380004882814, 0.4781025390625, 0.4778936767578125, 0.47798370361328124, 0.47788851928710935, 0.4786268310546875, 0.4782899169921875, 0.4779632568359375, 0.47777484130859377, 0.47753521728515624, 0.4779007873535156, 0.47758544921875, 0.47753726196289065, 0.47787213134765627, 0.47847628784179685, 0.4782909545898438, 0.4778014831542969, 0.47860223388671874, 0.47986483764648435, 0.47783935546875, 0.47765298461914063, 0.4779980773925781, 0.47794073486328126, 0.4787517395019531, 0.4781506652832031, 0.4780155029296875, 0.47900875854492186, 0.47818548583984377, 0.47805029296875, 0.4782233581542969, 0.47802471923828127, 0.4779346008300781, 0.47827557373046875, 0.478497802734375, 0.47849063110351564, 0.47899237060546873, 0.4786196594238281, 0.47815167236328127, 0.4799846801757813, 0.47795709228515626, 0.47788851928710935, 0.4778270568847656, 0.4778547058105469, 0.478060546875, 0.47782608032226564, 0.4778434143066406, 0.477601806640625, 0.4788572082519531, 0.47828274536132814, 0.47832986450195314, 0.4779417724609375, 0.477633544921875, 0.477907958984375, 0.478065673828125, 0.4780482482910156, 0.47808819580078127, 0.47858380126953126, 0.47811892700195313, 0.9911572265625, 0.4780707702636719, 0.47788134765625, 0.4775536499023437, 0.4774696960449219, 0.47756185913085936, 0.47758642578125, 0.4777615356445313, 0.47794790649414065, 0.4787118225097656, 0.47779736328125, 0.47749325561523437, 0.4775475158691406, 0.4777697143554688, 0.4781363220214844, 0.4779141235351563, 0.4792012939453125, 0.4778874816894531, 0.47856845092773437, 0.47768267822265625, 0.4781311950683594, 0.4785070190429688, 0.47809127807617186, 0.4776365966796875, 0.4780707702636719, 0.47816497802734376, 0.47828582763671873, 0.47777279663085936, 0.4780728454589844, 0.47835751342773436, 0.47851211547851563, 0.47822235107421873, 0.4775055236816406, 0.4781465454101563, 0.47790386962890624, 0.4779018249511719, 0.478359619140625, 0.4794295654296875, 0.4784394226074219, 0.4795248718261719, 0.4780544128417969, 0.477949951171875, 0.48094821166992185, 0.4794306640625, 0.4784814147949219, 0.4792197265625, 0.4782981262207031, 0.47831655883789065, 0.47831243896484377, 0.4784998474121094, 0.47935488891601563, 0.47914599609375, 0.48005630493164064, 0.48013311767578126, 0.47997030639648436, 0.48005426025390624, 0.4799140625, 0.4794142150878906, 0.4800153503417969, 0.4793067626953125, 0.47961505126953125, 0.47790789794921873, 0.4778670043945312, 0.9900185546875, 0.4775372924804687, 0.47779840087890624, 0.477633544921875, 0.4782950439453125, 0.47761920166015626, 0.47800421142578126, 0.478803955078125, 0.4791029663085937, 0.4782643127441406, 0.47842098999023436, 0.4776908874511719, 0.4780185546875, 0.47770932006835937, 0.4777851257324219, 0.477681640625, 0.477812744140625, 0.47979006958007814, 0.47791717529296873, 0.4780451965332031, 0.4788193359375, 0.477955078125, 0.47790591430664064, 0.47803903198242187, 0.47824697875976563, 0.4786861572265625, 0.47816192626953125, 0.477655029296875, 0.47835134887695313, 0.478919677734375, 0.47831039428710936, 0.4777205810546875, 0.4773253173828125, 0.47773284912109376, 0.4778260498046875, 0.4776069030761719, 0.47773492431640624, 0.4777461853027344, 0.478587890625, 0.47767041015625, 0.4789801330566406, 0.47915618896484374, 0.4780257263183594, 0.47762841796875, 0.4778168334960938, 0.4776714172363281, 0.47783526611328125, 0.47757107543945315, 0.4779704284667969, 0.4786677856445313, 0.47908148193359373, 0.4783206481933594, 0.4781240234375, 0.47756494140625, 0.4773294067382812, 0.47757208251953126, 0.477348876953125, 0.47757720947265625, 0.47773492431640624, 0.47816192626953125, 0.47770932006835937, 0.4789381103515625, 0.47819366455078127, 0.9904609375, 0.47848550415039065, 0.478013427734375, 0.4775751647949219, 0.4774912109375, 0.4782847900390625, 0.4794931335449219, 0.4796549072265625, 0.47851318359375, 0.4785254211425781, 0.47859506225585935, 0.4788695068359375, 0.47816293334960935, 0.47827969360351563, 0.4785244140625, 0.4797358093261719, 0.4782438354492187, 0.4776570739746094, 0.4789125061035156, 0.47882650756835937, 0.4777082824707031, 0.47753216552734373, 0.47809637451171877, 0.47831655883789065, 0.4776488952636719, 0.4774471740722656, 0.4796170349121094, 0.47986483764648435, 0.4793456726074219, 0.479599609375, 0.4791654357910156, 0.4776488952636719, 0.4775475158691406, 0.47792538452148436, 0.4778240051269531, 0.4774819946289062, 0.4782612915039062, 0.4777738037109375, 0.4777543640136719, 0.47963134765625, 0.47828070068359374, 0.4778486022949219, 0.4777625427246094, 0.477601806640625, 0.4777185363769531, 0.47762432861328125, 0.47770932006835937, 0.4777062377929687, 0.47948800659179686, 0.4784066467285156, 0.47847015380859376, 0.4775106506347656, 0.4777420654296875, 0.47842098999023436, 0.47805233764648436, 0.47766015625, 0.47758746337890623, 0.47821926879882815, 0.4781087036132812, 0.4776806640625, 0.47913876342773437, 0.47851828002929686, 0.4783575744628906]",tokens/s,2.059232151132392,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3951.190016,12732.33408,0.0,12085.886976,11337.496064,s,10,10.983638916015623,1.0983638916015628,0.002145812483257245,1.0982380981445312,1.1009110717773436,1.1018160705566407,1.1025400695800782,"[1.1027210693359375, 1.1007099609375, 1.0960345458984375, 1.0957255859375, 1.0965640869140625, 1.096655029296875, 1.0976143798828124, 1.09886181640625, 1.0996507568359375, 1.0991016845703125]",tokens/s,233.07394021003134,kWh,1.2942184060811996e-05,7.0910633564744784e-06,6.339727294000231e-05,8.343052035728877e-05,tokens/kWh,3068421.4709879244,MB,3951.190016,12732.33408,0.0,12085.886976,11686.800384,s,10,637.80165234375,63.780165234375,0.009100497531913991,63.777451171875,63.784855859375,63.7953341796875,63.8037168359375,"[63.775953125, 63.7747265625, 63.78252734375, 63.77445703125, 63.77937109375, 63.7818203125, 63.8058125, 63.7783125, 63.77208203125, 63.77658984375]",tokens/s,0.9877679019565393,kWh,0.0007529117455250688,0.00041266306092793453,0.003710265523765599,0.004875840330218602,tokens/kWh,12920.849686063342,,s,629,646.6700090332033,1.028092224218129,0.13044395391192767,1.0122936401367189,1.0129029296874998,1.0132424438476564,2.1090928125,"[1.0118082275390625, 1.0120591430664063, 1.0121963500976563, 1.0122721557617187, 1.0119987182617187, 1.0119086303710938, 1.0127564697265625, 1.0119772338867188, 1.013127197265625, 1.0117877807617188, 1.0121246948242189, 1.0118850708007812, 1.0127083740234375, 1.0122250366210936, 1.0122311401367188, 1.01222705078125, 1.0121298217773438, 1.01207958984375, 1.0122537231445312, 1.0121769409179688, 1.0120580444335938, 1.0120304565429687, 1.0126674194335938, 1.0122168579101563, 1.0119823608398437, 1.0118645629882812, 1.0125404052734375, 1.0122362670898437, 1.01228955078125, 1.012083740234375, 1.0118092651367188, 1.0122362670898437, 1.0126837768554688, 1.0127236938476563, 1.0127257690429687, 1.0131476440429688, 1.0123878784179687, 1.0122587280273438, 1.0124882202148437, 1.0123243408203124, 1.0121123657226563, 1.0124574584960937, 1.012696044921875, 1.0123038940429687, 1.012063232421875, 1.01218408203125, 1.0126653442382811, 1.012890625, 1.0124677124023438, 1.0129090576171875, 1.0120939331054688, 1.01304931640625, 1.0123591918945312, 1.0120345458984374, 1.0119608154296875, 1.0121820068359375, 1.0124882202148437, 1.0120724487304686, 1.01193115234375, 1.0123868408203125, 1.0123858642578125, 1.0128014526367188, 2.113512451171875, 1.0119444580078125, 1.0120970458984375, 1.0116249389648437, 1.0117980346679687, 1.0118410034179688, 1.0118922119140625, 1.0117406616210938, 1.012063232421875, 1.0121236572265624, 1.0118225708007813, 1.0117457885742187, 1.012200439453125, 1.0118594360351563, 1.0123018188476562, 1.0119075927734376, 1.0119393310546876, 1.0118450927734375, 1.0121697387695312, 1.0119772338867188, 1.0117940063476563, 1.0117733764648438, 1.0121502685546875, 1.0117877807617188, 1.0121226196289062, 1.0117345581054686, 1.011778564453125, 1.0116874389648438, 1.0122762451171874, 1.0118328247070312, 1.0120519409179687, 1.0118645629882812, 1.0123099975585939, 1.0122506103515625, 1.012801513671875, 1.0120089721679688, 1.0126571655273438, 1.0122014770507812, 1.0129817504882812, 1.0126458740234374, 1.012490234375, 1.0121615600585938, 1.0127646484375, 1.0119393310546876, 1.0128711547851563, 1.011999755859375, 1.0165934448242187, 1.0120037841796874, 1.0129735717773438, 1.0124287719726563, 1.0122291259765626, 1.0124595336914062, 1.01273291015625, 1.0122619018554688, 1.0127390747070313, 1.0128394165039063, 1.0125383911132813, 1.01269091796875, 1.013369873046875, 1.0124830932617188, 1.0126510009765626, 1.0123509521484375, 1.0127933349609375, 2.1089033203125, 1.0117857055664063, 1.0120253295898438, 1.012158447265625, 1.0124718017578125, 1.0120816650390625, 1.0120447998046875, 1.0125680541992188, 1.0123571166992187, 1.0120724487304686, 1.0127575073242188, 1.01252001953125, 1.0122987060546875, 1.0124451904296874, 1.0127728881835938, 1.011794921875, 1.011989501953125, 1.0122485961914063, 1.0122066040039062, 1.0119823608398437, 1.012210693359375, 1.0119772338867188, 1.01199462890625, 1.0123724975585937, 1.0123673706054688, 1.0120867919921874, 1.0119280395507813, 1.0121994018554688, 1.0121431274414063, 1.0121226196289062, 1.0124666748046875, 1.0119382934570313, 1.012031494140625, 1.0129346313476562, 1.012621337890625, 1.0127984619140624, 1.0124257202148437, 1.012552734375, 1.0124932861328124, 1.0126151733398439, 1.0124769287109374, 1.0124185791015625, 1.0123202514648437, 1.0127390747070313, 1.0145853271484375, 1.0121594848632813, 1.0121994018554688, 1.016111083984375, 1.01210009765625, 1.012337646484375, 1.0126919555664062, 1.0125444946289062, 1.0131015625, 1.0132469482421875, 1.0124186401367188, 1.0119146728515624, 1.0125578002929687, 1.0123294677734376, 1.0121595458984376, 1.0121768798828126, 1.0121963500976563, 1.0120345458984374, 1.0120714111328124, 2.108083251953125, 1.0117611694335937, 1.0117723999023438, 1.0123253784179687, 1.0121656494140625, 1.011751953125, 1.011820556640625, 1.0120601806640626, 1.0119454956054688, 1.01188916015625, 1.0119761962890625, 1.0117877807617188, 1.0130258178710938, 1.0123018188476562, 1.0121328735351562, 1.01246875, 1.0121953125, 1.0118369140625, 1.0122199096679687, 1.0124267578125, 1.0122546997070312, 1.0124503173828125, 1.0120703735351562, 1.0122537231445312, 1.0119536743164061, 1.0123397216796874, 1.0123960571289063, 1.0120325317382812, 1.0122772216796876, 1.012115478515625, 1.012005859375, 1.011937255859375, 1.0124103393554686, 1.0128527221679688, 1.012220947265625, 1.0126182250976563, 1.0130596313476563, 1.012464599609375, 1.0132828369140625, 1.0123468627929688, 1.0119547119140626, 1.0120048828125, 1.0124175415039063, 1.012010986328125, 1.0151219482421876, 1.0123069458007812, 1.0124124145507813, 1.0122076416015624, 1.0128138427734374, 1.0126878662109375, 1.0123939819335936, 1.0121318359375, 1.01235302734375, 1.011895263671875, 1.0121062622070311, 1.0122885131835937, 1.0123724975585937, 1.012242431640625, 1.012421630859375, 1.0123786010742188, 1.0121113891601563, 1.0120017700195312, 1.0122393798828124, 2.10916650390625, 1.0120038452148437, 1.0127247314453125, 1.0125916137695312, 1.0125660400390626, 1.0119761962890625, 1.0121748657226564, 1.0119721069335939, 1.0121370239257812, 1.0121942138671876, 1.0123048706054687, 1.0119403686523438, 1.0117099609375, 1.01187890625, 1.0121195678710937, 1.0117509155273436, 1.0118615112304687, 1.013433349609375, 1.012052978515625, 1.0117959594726562, 1.0120274047851563, 1.0121123657226563, 1.012421630859375, 1.012052978515625, 1.0121298217773438, 1.0119987182617187, 1.0118973388671875, 1.0125066528320312, 1.0120836791992187, 1.0121307983398438, 1.01264697265625, 1.0120714111328124, 1.0120857543945312, 1.0123540649414062, 1.0124544067382812, 1.0121564331054687, 1.0122383422851562, 1.0122454833984376, 1.0119935913085938, 1.0119239501953126, 1.0125363159179688, 1.0117755126953125, 1.011968017578125, 1.0119495239257812, 1.0124840698242188, 1.0122025146484375, 1.0122854614257812, 1.01258447265625, 1.0125885620117188, 1.0125301513671876, 1.012516845703125, 1.012400146484375, 1.0123970336914063, 1.0127892456054688, 1.01247998046875, 1.01256396484375, 1.0124237060546875, 1.0165504150390625, 1.0126458740234374, 1.0126848754882813, 1.0138418579101562, 1.0126428833007812, 1.0132418212890626, 2.10956298828125, 1.0119423828125, 1.0119608154296875, 1.0120847778320312, 1.011984375, 1.0120007934570312, 1.0124031982421875, 1.01182568359375, 1.0117908325195313, 1.0122680053710937, 1.012263916015625, 1.0119035034179686, 1.0124677734375, 1.0120057983398438, 1.0118276977539062, 1.011904541015625, 1.0125209350585938, 1.012252685546875, 1.0120653076171875, 1.0124205932617187, 1.0122495727539063, 1.0123724975585937, 1.0126407470703125, 1.012590576171875, 1.0119659423828125, 1.0125209350585938, 1.0127493286132812, 1.0124779663085937, 1.0127708129882813, 1.0121123657226563, 1.0121646118164063, 1.0116229248046875, 1.0122229614257812, 1.0120335083007812, 1.0120325317382812, 1.0120078735351563, 1.0126233520507812, 1.0124185791015625, 1.01650634765625, 1.0135787353515624, 1.012727783203125, 1.0122936401367189, 1.0128947143554687, 1.0121277465820313, 1.012600830078125, 1.0126612548828124, 1.0123960571289063, 1.012105224609375, 1.0127769775390625, 1.01275341796875, 1.012274169921875, 1.012279296875, 1.0123386840820312, 1.0121942749023438, 1.0125885620117188, 1.012947998046875, 1.0124779663085937, 1.0123930053710937, 1.0128373413085938, 1.0122721557617187, 1.01233251953125, 1.0123386840820312, 1.0127564697265625, 2.110841796875, 1.0128209838867188, 1.0126233520507812, 1.012595703125, 1.0131865844726562, 1.01278515625, 1.0128342895507811, 1.0131220703125, 1.0128527221679688, 1.012337646484375, 1.0124810180664063, 1.0129080200195313, 1.012747314453125, 1.0124318237304688, 1.0134036254882812, 1.0129408569335938, 1.0123612060546876, 1.0120929565429688, 1.0134886474609375, 1.0132265014648438, 1.0129080200195313, 1.0132152099609375, 1.0127984619140624, 1.0145208129882812, 1.013728271484375, 1.0132162475585937, 1.0127083740234375, 1.0127789916992187, 1.012833251953125, 1.0120120239257813, 1.0122465209960938, 1.0125762329101562, 1.0127605590820312, 1.0129448852539062, 1.01275341796875, 1.0117826538085937, 1.0119331665039062, 1.0120591430664063, 1.0131445922851563, 1.0131220703125, 1.0128814086914062, 1.0128875732421876, 1.0127513427734376, 1.013012451171875, 1.0131896362304686, 1.0130308837890625, 1.0130473022460937, 1.0133401489257812, 1.0132930297851563, 1.0126981201171874, 1.0128568115234375, 1.012906982421875, 1.0124503173828125, 1.0124984130859376, 1.0132520751953125, 1.0128076782226563, 1.0122936401367189, 1.0126827392578126, 1.0122035522460937, 1.0120509033203124, 1.0123540649414062, 1.0125946655273437, 1.0123171997070313, 2.112021484375, 1.0123406982421874, 1.0120253295898438, 1.0121513061523437, 1.0124144897460938, 1.012274169921875, 1.012401123046875, 1.0119710693359374, 1.012263916015625, 1.0128281860351562, 1.0122537231445312, 1.0122373046875, 1.0121615600585938, 1.0122373046875, 1.0120325317382812, 1.0120929565429688, 1.01382861328125, 1.0119669799804687, 1.0119239501953126, 1.0123489379882813, 1.0119403686523438, 1.0117539672851563, 1.0119976806640625, 1.0121533203125, 1.0122137451171875, 1.0119065551757813, 1.0129019165039062, 1.0124779052734374, 1.0123939819335936, 1.0126571655273438, 1.0125834350585938, 1.0119721069335939, 1.0120038452148437, 1.012474853515625, 1.0120499267578125, 1.0122383422851562, 1.0124830932617188, 1.0124758911132812, 1.0127083740234375, 1.012632568359375, 1.0122034912109374, 1.0120222778320314, 1.012178955078125, 1.0122977294921875, 1.0122034912109374, 1.01212158203125, 1.0132428588867188, 1.0123274536132814, 1.0125435180664062, 1.0123755493164062, 1.0122383422851562, 1.01226904296875, 1.0125250854492187, 1.0119833374023437, 1.0120601806640626, 1.0121134033203125, 1.012974609375, 1.0124656372070313, 1.01243798828125, 1.012580322265625, 1.0127124633789062, 1.0127247314453125, 1.0126704711914063, 2.11262353515625, 1.0118778686523437, 1.0124564208984375, 1.0118143920898437, 1.0125772705078124, 1.0121441040039063, 1.0118225708007813, 1.0119639892578125, 1.0122833251953125, 1.011894287109375, 1.0119280395507813, 1.0119198608398436, 1.0127401123046875, 1.01235205078125, 1.012570068359375, 1.011726318359375, 1.0117744750976563, 1.013375, 1.0122465209960938, 1.01218505859375, 1.0120325317382812, 1.011989501953125, 1.0127339477539063, 1.0121031494140624, 1.0123878173828125, 1.0119874267578124, 1.011962890625, 1.0119680786132812, 1.0123140258789063, 1.0120407104492188, 1.012220947265625, 1.0121410522460939, 1.0122772216796876, 1.011979248046875, 1.0125343017578126, 1.01197412109375, 1.01228955078125, 1.0118973388671875, 1.0122465209960938, 1.0119423828125, 1.012041748046875, 1.0123131103515626, 1.0124677124023438, 1.0119342041015624, 1.012621337890625, 1.012105224609375, 1.0122291259765626, 1.0119669799804687, 1.0125332641601563, 1.0122034912109374, 1.0122045288085937, 1.0123519897460938, 1.012875244140625, 1.0126397705078125, 1.0126233520507812, 1.0123182373046875, 1.0127708129882813, 1.0124646606445313, 1.0125987548828126, 1.0120775756835938, 1.012442138671875, 1.012552734375, 1.012252685546875, 2.11210546875, 1.011620849609375, 1.0120519409179687, 1.0119721069335939, 1.0122485961914063, 1.0120714111328124, 1.0120621948242188, 1.0127001342773438, 1.0125178833007813, 1.012548583984375, 1.01243701171875, 1.0118604736328125, 1.0117130126953124, 1.0120304565429687, 1.0122587890625, 1.01167822265625, 1.0118010864257811, 1.0119813232421875, 1.011894287109375, 1.0122905883789062, 1.01184716796875, 1.0117959594726562, 1.0116761474609375, 1.0118410034179688, 1.0117744750976563, 1.0119515991210937, 1.0144215087890625, 1.0122987670898438, 1.012021240234375, 1.0126315307617189, 1.0124862060546875, 1.0122158203125, 1.0121727905273437, 1.0125875244140625, 1.0124472045898438, 1.0122403564453124, 1.0122117309570313, 1.0127349853515626, 1.0121431274414063, 1.012495361328125, 1.012738037109375, 1.0124758911132812, 1.0128773193359375, 1.0123079833984374, 1.0123099975585939, 1.0121298217773438, 1.0122557373046874, 1.0126878662109375, 1.0124810180664063, 1.013000244140625, 1.012516845703125, 1.0124298095703126, 1.012358154296875, 1.0124656372070313, 1.0123171997070313, 1.01239501953125, 1.01268994140625, 1.0127656860351562, 1.01236328125, 1.0124308471679688, 1.012527099609375, 1.0123038940429687, 1.0129132080078125]",tokens/s,0.972675384993313,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4144.504832,15760.621568,0.0,15114.174464,14045.205504,s,10,15.963228881835938,1.5963228881835938,0.0014829914244840832,1.5958744506835938,1.5984191040039064,1.5985000183105469,1.5985647497558595,"[1.595896728515625, 1.598401123046875, 1.5945810546875, 1.5952257080078125, 1.5950601806640625, 1.5945120849609375, 1.5958521728515624, 1.5975928955078125, 1.5975260009765626, 1.5985809326171876]",tokens/s,160.36855820021125,kWh,1.8825581471125285e-05,1.0316417645935872e-05,8.981001629239093e-05,0.00011895201540945208,tokens/kWh,2152128.310889114,MB,4144.504832,15760.621568,0.0,15114.174464,14169.857024,s,10,928.8058671874999,92.88058671875,0.004527086516632578,92.879125,92.88628203124999,92.886953515625,92.887490703125,"[92.8758671875, 92.88065625, 92.887625, 92.8848203125, 92.8846328125, 92.875515625, 92.87759375, 92.8768203125, 92.876203125, 92.8861328125]",tokens/s,0.6782902889144009,kWh,0.0010964920105205644,0.0006009745227833628,0.005288325397323612,0.006985791930627541,tokens/kWh,9018.304671198623,,s,629,941.6992608642577,1.4971371396888042,0.1896008283998211,1.4742733154296874,1.4748571533203125,1.4750364013671875,3.06951080078125,"[1.473944580078125, 1.47329638671875, 1.4738431396484375, 1.47358203125, 1.47386572265625, 1.4748426513671875, 1.47376123046875, 1.4739620361328125, 1.47388720703125, 1.47437158203125, 1.474060302734375, 1.4738883056640626, 1.4746326904296876, 1.473596435546875, 1.4739219970703126, 1.474044921875, 1.474198486328125, 1.4741043701171874, 1.4745589599609374, 1.4744586181640624, 1.4741165771484375, 1.4748856201171876, 1.474040771484375, 1.473933349609375, 1.474250732421875, 1.4747626953125, 1.4741483154296875, 1.4741063232421876, 1.47391796875, 1.47371826171875, 1.4743121337890626, 1.4736240234375, 1.474345947265625, 1.4741422119140626, 1.47420263671875, 1.473943603515625, 1.474134033203125, 1.474208740234375, 1.473671142578125, 1.474809814453125, 1.473850341796875, 1.47422314453125, 1.4740899658203126, 1.4746153564453126, 1.474150390625, 1.474164794921875, 1.474740234375, 1.4742220458984374, 1.474397216796875, 1.47470947265625, 1.4741094970703126, 1.474333740234375, 1.4749337158203124, 1.4750013427734374, 1.4743634033203126, 1.47471875, 1.4743265380859376, 1.4740859375, 1.4744422607421874, 1.4750106201171875, 1.4745364990234375, 1.4744473876953126, 3.072501708984375, 1.4742149658203125, 1.47382275390625, 1.4743377685546875, 1.4739732666015626, 1.4742138671875, 1.473607666015625, 1.473517578125, 1.4735635986328126, 1.4740213623046876, 1.47407568359375, 1.474303955078125, 1.474218017578125, 1.4742906494140624, 1.4739681396484374, 1.4741094970703126, 1.47432958984375, 1.47487744140625, 1.474449462890625, 1.474229248046875, 1.474135009765625, 1.474888671875, 1.4737879638671876, 1.4744893798828125, 1.4746142578125, 1.474745361328125, 1.4743009033203125, 1.4740203857421874, 1.4740869140625, 1.4739189453125, 1.474797607421875, 1.4739200439453124, 1.474556884765625, 1.4741739501953126, 1.4747054443359375, 1.4741309814453125, 1.4740633544921875, 1.4746910400390625, 1.4741361083984375, 1.4743233642578124, 1.474419677734375, 1.4740213623046876, 1.47390673828125, 1.4743306884765626, 1.4746715087890625, 1.474198486328125, 1.474620361328125, 1.47437255859375, 1.4746910400390625, 1.474514892578125, 1.4745426025390624, 1.47401220703125, 1.4740531005859374, 1.47437158203125, 1.475441650390625, 1.4746439208984374, 1.474841552734375, 1.47451904296875, 1.474598876953125, 1.474634765625, 1.4747381591796875, 1.4739384765625, 1.473933349609375, 3.0693232421875, 1.473892333984375, 1.474171875, 1.4738431396484375, 1.474007080078125, 1.4742384033203124, 1.47473095703125, 1.4749234619140625, 1.473754150390625, 1.4738544921875, 1.474039794921875, 1.47367529296875, 1.47413916015625, 1.47485693359375, 1.4745743408203125, 1.474783203125, 1.475303466796875, 1.474924560546875, 1.4741279296875, 1.4751273193359375, 1.4750792236328125, 1.474249755859375, 1.4747965087890624, 1.474628662109375, 1.4746798095703124, 1.47464599609375, 1.475072021484375, 1.4749490966796874, 1.474650146484375, 1.474839599609375, 1.4745999755859376, 1.47433984375, 1.474587646484375, 1.4739671630859374, 1.47449853515625, 1.47369580078125, 1.4738216552734376, 1.4743746337890624, 1.474156494140625, 1.47430712890625, 1.4742518310546875, 1.474651123046875, 1.4738739013671875, 1.474051025390625, 1.4744114990234376, 1.474198486328125, 1.474423828125, 1.4742803955078125, 1.4740162353515625, 1.473574951171875, 1.4743848876953125, 1.4738145751953124, 1.47405419921875, 1.4739793701171875, 1.475199951171875, 1.4745067138671875, 1.4744473876953126, 1.4743388671875, 1.474714599609375, 1.4745006103515625, 1.4744422607421874, 1.4752808837890625, 1.474572265625, 3.06950341796875, 1.4739578857421876, 1.4735145263671876, 1.4739886474609376, 1.4746470947265624, 1.4735728759765625, 1.4737674560546874, 1.4742681884765625, 1.4737213134765625, 1.47318994140625, 1.47401318359375, 1.47386474609375, 1.47394970703125, 1.4739599609375, 1.4740623779296875, 1.474145263671875, 1.4742425537109376, 1.4741483154296875, 1.4742333984375, 1.4738831787109374, 1.474155517578125, 1.4743111572265626, 1.474144287109375, 1.4746112060546874, 1.4750372314453124, 1.4743541259765625, 1.4747115478515624, 1.4738729248046876, 1.474272216796875, 1.47419140625, 1.4745753173828124, 1.4750064697265626, 1.47466650390625, 1.4748436279296875, 1.474193359375, 1.4741329345703125, 1.4744525146484375, 1.4744913330078124, 1.4748323974609374, 1.4744207763671875, 1.4753935546875, 1.4751805419921875, 1.474482177734375, 1.4740582275390626, 1.4748548583984376, 1.474802734375, 1.4744371337890625, 1.4744453125, 1.4744443359375, 1.4745057373046875, 1.4743941650390624, 1.4749481201171875, 1.47416162109375, 1.47437255859375, 1.4748753662109375, 1.474218994140625, 1.47468798828125, 1.4740306396484375, 1.4754232177734374, 1.474871337890625, 1.4750535888671874, 1.474193359375, 1.47433984375, 3.0700166015625, 1.474471923828125, 1.4742957763671876, 1.474418701171875, 1.4738094482421875, 1.473839111328125, 1.4741832275390625, 1.47508935546875, 1.474249755859375, 1.474166748046875, 1.4742117919921875, 1.47376953125, 1.474102294921875, 1.4737879638671876, 1.4748671875, 1.4747493896484376, 1.474883544921875, 1.4746419677734375, 1.4745159912109376, 1.4746552734375, 1.474293701171875, 1.4748323974609374, 1.4744105224609374, 1.474787353515625, 1.47443408203125, 1.4740316162109375, 1.4738883056640626, 1.47376953125, 1.47512939453125, 1.47419140625, 1.4745528564453125, 1.4741678466796875, 1.47439208984375, 1.4739793701171875, 1.4745281982421874, 1.47403466796875, 1.4748272705078125, 1.474185302734375, 1.4739229736328125, 1.4739056396484376, 1.473829833984375, 1.474093017578125, 1.4740869140625, 1.474112548828125, 1.4749757080078125, 1.4745753173828124, 1.4741094970703126, 1.4738267822265625, 1.4748529052734376, 1.4741513671875, 1.473798095703125, 1.47447607421875, 1.474387939453125, 1.4741156005859375, 1.4746163330078126, 1.474620361328125, 1.4744791259765626, 1.4744678955078125, 1.4747166748046876, 1.4750802001953125, 1.4745999755859376, 1.4739814453125, 1.4747626953125, 3.069513671875, 1.47412890625, 1.4743746337890624, 1.4740838623046875, 1.4746378173828125, 1.4743603515625, 1.4743265380859376, 1.4740521240234374, 1.474298828125, 1.474333740234375, 1.47361279296875, 1.4742005615234375, 1.4735267333984374, 1.473796142578125, 1.4742364501953125, 1.47390869140625, 1.473765380859375, 1.4739835205078125, 1.4745712890625, 1.4740101318359375, 1.4739835205078125, 1.4743961181640626, 1.474408447265625, 1.4742752685546876, 1.4744114990234376, 1.4744586181640624, 1.4740623779296875, 1.4737520751953126, 1.47413916015625, 1.4744801025390626, 1.4740203857421874, 1.474050048828125, 1.47420361328125, 1.4737581787109375, 1.4745753173828124, 1.474017333984375, 1.474734130859375, 1.47439208984375, 1.4745015869140625, 1.4748160400390624, 1.474017333984375, 1.473838134765625, 1.4741207275390624, 1.473882080078125, 1.4738841552734374, 1.47435107421875, 1.4739732666015626, 1.4739046630859376, 1.4738043212890626, 1.4740582275390626, 1.4742333984375, 1.4741370849609374, 1.4745313720703126, 1.47392822265625, 1.4746859130859375, 1.4744012451171875, 1.4736475830078124, 1.4745435791015624, 1.4739937744140625, 1.4749501953125, 1.4744842529296875, 1.474662353515625, 1.474905029296875, 3.070246826171875, 1.4750576171875, 1.4738739013671875, 1.4734581298828124, 1.4737919921875, 1.474293701171875, 1.4743428955078124, 1.47382275390625, 1.47458154296875, 1.47357080078125, 1.474060302734375, 1.4740726318359374, 1.4753177490234375, 1.473629150390625, 1.4743223876953124, 1.4743489990234375, 1.473955810546875, 1.4739876708984374, 1.4747186279296876, 1.4738134765625, 1.4743214111328125, 1.4742047119140624, 1.4742978515625, 1.4737418212890625, 1.4734571533203125, 1.473523681640625, 1.473606689453125, 1.473850341796875, 1.4743746337890624, 1.4742579345703124, 1.4741832275390625, 1.474040771484375, 1.4740684814453124, 1.4739732666015626, 1.4740592041015625, 1.474809814453125, 1.4747279052734374, 1.4743153076171875, 1.4744268798828124, 1.474154541015625, 1.4741319580078125, 1.473996826171875, 1.4746644287109374, 1.4744012451171875, 1.4747647705078124, 1.474788330078125, 1.4744166259765625, 1.474029541015625, 1.4741083984375, 1.4744698486328125, 1.47449853515625, 1.473933349609375, 1.475099609375, 1.4742476806640625, 1.474344970703125, 1.47441455078125, 1.47498388671875, 1.4740675048828125, 1.4742537841796874, 1.4744873046875, 1.474292724609375, 1.47422509765625, 1.4751826171875, 3.072003173828125, 1.47430908203125, 1.4742056884765624, 1.474298828125, 1.4740029296875, 1.47441357421875, 1.4743634033203126, 1.473666015625, 1.4739056396484376, 1.4738585205078125, 1.4737633056640624, 1.47390771484375, 1.474503662109375, 1.4742467041015626, 1.47406640625, 1.4739844970703124, 1.474124755859375, 1.4739844970703124, 1.47361279296875, 1.474107421875, 1.473881103515625, 1.47378076171875, 1.474008056640625, 1.4745169677734375, 1.474460693359375, 1.474466796875, 1.4740469970703125, 1.4737305908203124, 1.47407666015625, 1.4743223876953124, 1.4743223876953124, 1.4740029296875, 1.4749521484375, 1.474461669921875, 1.4740694580078124, 1.4741329345703125, 1.47427734375, 1.474609130859375, 1.4740633544921875, 1.474460693359375, 1.474523193359375, 1.4746634521484374, 1.4743521728515625, 1.4738759765625, 1.4742733154296874, 1.4747269287109375, 1.474681884765625, 1.474302001953125, 1.4744627685546876, 1.4744678955078125, 1.4744381103515625, 1.474345947265625, 1.4741944580078126, 1.4742548828125, 1.4745078125, 1.4744801025390626, 1.47441455078125, 1.47418115234375, 1.4743223876953124, 1.4743634033203126, 1.4744627685546876, 1.4740469970703125, 1.4741370849609374, 3.0712216796875, 1.4734315185546876, 1.4745855712890625, 1.47361279296875, 1.473840087890625, 1.4741380615234374, 1.4739364013671874, 1.4736414794921875, 1.473987548828125, 1.4741053466796874, 1.4739149169921875, 1.47364453125, 1.4739620361328125, 1.47382373046875, 1.473976318359375, 1.474334716796875, 1.4739261474609375, 1.473628173828125, 1.4737264404296875, 1.4739302978515625, 1.4734212646484375, 1.4736036376953126, 1.4741319580078125, 1.47426513671875, 1.474302978515625, 1.4739517822265624, 1.4746705322265625, 1.47380126953125, 1.4740101318359375, 1.4745528564453125, 1.4744248046875, 1.47426513671875, 1.4744791259765626, 1.4744033203125, 1.4740244140625, 1.474103271484375, 1.4746705322265625, 1.4742518310546875, 1.474093017578125, 1.474193359375, 1.4737838134765624, 1.473976318359375, 1.474071533203125, 1.47503515625, 1.4746429443359375, 1.4743326416015625, 1.4748948974609375, 1.473871826171875, 1.4741412353515626, 1.474756591796875, 1.474630615234375, 1.474577392578125, 1.474472900390625, 1.474883544921875, 1.4748580322265625, 1.47447705078125, 1.4747811279296874, 1.47460400390625, 1.4745589599609374, 1.4746265869140625, 1.4749808349609375, 1.4749000244140624, 1.474957275390625, 3.07293701171875, 1.4741534423828124, 1.473850341796875, 1.474186279296875, 1.4741903076171874, 1.47452001953125, 1.4740736083984376, 1.474038818359375, 1.474935791015625, 1.4743348388671875, 1.473924072265625, 1.4737950439453125, 1.474008056640625, 1.4743111572265626, 1.474093017578125, 1.4743223876953124, 1.4741851806640625, 1.4741422119140626, 1.4739793701171875, 1.474102294921875, 1.4738380126953126, 1.4738585205078125, 1.4744945068359374, 1.4743634033203126, 1.474292724609375, 1.4741781005859376, 1.4750125732421875, 1.4740316162109375, 1.4740899658203126, 1.473870849609375, 1.47460302734375, 1.4747423095703125, 1.4751129150390625, 1.4748599853515625, 1.474618408203125, 1.475168212890625, 1.4755379638671875, 1.4745087890625, 1.474472900390625, 1.474514892578125, 1.474840576171875, 1.47443603515625, 1.4743101806640626, 1.4746552734375, 1.4748037109375, 1.4745220947265625, 1.4747740478515625, 1.4746317138671876, 1.4742313232421875, 1.4737684326171876, 1.4743951416015626, 1.474081787109375, 1.4740859375, 1.47426611328125, 1.474807861328125, 1.47405615234375, 1.4741063232421876, 1.474302978515625, 1.4747801513671874, 1.4749112548828125, 1.4753310546875, 1.4741790771484375, 1.4742220458984374]",tokens/s,0.6679414821062154,,,,, 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-7b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-7b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b9d-574d465929854d212abc8336;fc68051f-daa3-4a93-a3f9-8192e8c7598f) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33c0-7a9b7f5622f08f2f208e11d7;30df4c13-7f2d-4e2a-a621-9f7efabf4c83) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 613, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 419, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 626, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3418-14242bc375c7c84d2d2560d0;fd9f41ba-7b1b-4e87-9ac3-9c69f68a62ed) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32bc-181602bb5b6804f4605e03af;b0382be4-9e07-4500-ae37-b75095164dcb) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a310c-13cbf8860520765d59944807;22f48d97-a4f7-4bd8-abfe-11e6eeabd118) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c42-57b4abac60c4dc8a6feb8690;3702add4-1001-4098-9e34-0ad2133d58ce) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3312-7724f04b06a3d17c3c48f842;3b7ad6cd-6786-497e-a2ac-0da01ba342a1) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30b5-23525fd56c9f8ada53d74549;6e2b02bd-6145-43c1-b8a7-6a5e94a46e6e) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-40b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-40b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34c4-58e6292404575c923a6f8220;51e59f82-8f9d-4d1a-8a87-5c0eecba9a96) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ff0-75ac448a266c8ae066e54f1b;d7b1e2f1-57f5-409e-ac8e-e17223be95cb) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3b4-3ac10f5a795747d90001d381;7aed4a55-22a1-4e08-b043-ede56dfd44a4) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc40a-4daddb543ed0c334631eef10;b92856c5-d238-4f91-96e5-d57104c4e8f8) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc30b-5c70826c027bf75a4f4f5a87;763d81c7-f279-4300-9a19-752e0983f5a7) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc360-382374311451236047d2363d;1968e638-7d06-44d7-84c9-d6aead917ae2) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f43-4b50d2616245d1df4f8b3a75;b2944d7f-8ffd-4419-8fcf-1c1194cfb150) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2eed-6a378f7f009afe55400fb6af;91d5d3b1-b721-4ca6-8f2b-78e4d70a10c2) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-rw-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-rw-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3212-586792db15c7ce6c115e3f45;dd0198d2-698d-427e-9283-83fd7b9ffe22) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-6677949d-6f89a4b44b4a01eb781d3760;184f8c6b-6a08-4d58-80ec-93a55a4c0e9c) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31bb-35d90ec91d88d1fb64650b6b;86a31e46-eed2-41b9-b433-a7f454580e0c) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe71-5eae2fc5709a6682487f8ef8;68ffd37e-b341-4a36-b7f1-a8b98c1da705) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3050-1d20c9e55e60358332726653;98fb45d9-d47a-4071-9860-d1902d8f4607) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,2054.77888,5566.365696,0.0,4919.918592,4635.537408,s,10,5.159750671386719,0.5159750671386718,0.0024202673095528563,0.5153813171386719,0.5170659973144531,0.5199656463623047,0.5222853656005859,"[0.5228652954101562, 0.5143955078125, 0.5157650146484375, 0.5144519653320313, 0.5155265502929688, 0.5141107177734375, 0.516309814453125, 0.5146680908203125, 0.516421630859375, 0.515236083984375]",tokens/s,496.14800463061573,kWh,6.087385805116759e-06,3.3354087369571064e-06,2.7887036198498415e-05,3.7309830740572285e-05,tokens/kWh,6861462.379179726,MB,2054.77888,5566.365696,0.0,4919.918592,4794.439168,s,10,301.012869140625,30.101286914062502,0.007474756939868433,30.101162109375,30.1092380859375,30.11128505859375,30.11292263671875,"[30.104474609375, 30.0874609375, 30.10366796875, 30.09865625, 30.095248046875, 30.1085703125, 30.108783203125, 30.11333203125, 30.097923828125, 30.094751953125]",tokens/s,2.0929337732257594,kWh,0.00035535723721815483,0.00019476662748246779,0.0016039330748120972,0.00215405693951272,tokens/kWh,29247.13773548231,,s,629,305.1713383178704,0.4851690593288888,0.06123176770881389,0.4777687072753906,0.47843901367187497,0.47872921142578123,0.9921051538085938,"[0.47712460327148437, 0.4769034118652344, 0.47702426147460936, 0.47709390258789064, 0.47730584716796876, 0.4771205139160156, 0.4773304443359375, 0.47765298461914063, 0.4770846862792969, 0.4785274963378906, 0.47798681640625, 0.47815167236328127, 0.4778588256835937, 0.4773570556640625, 0.477907958984375, 0.47804620361328126, 0.47764480590820313, 0.4777400207519531, 0.4774901733398437, 0.4775546875, 0.4790906982421875, 0.47841998291015625, 0.4775454711914062, 0.4774205322265625, 0.4777062377929687, 0.4773007507324219, 0.47782296752929687, 0.477517822265625, 0.47927194213867186, 0.47922592163085936, 0.4796824951171875, 0.4796200866699219, 0.478603271484375, 0.47771649169921876, 0.4775034790039063, 0.47786904907226563, 0.4779376525878906, 0.4778526611328125, 0.47831039428710936, 0.47768167114257815, 0.4777820129394531, 0.47838104248046875, 0.4779438171386719, 0.4779376525878906, 0.47762432861328125, 0.47779022216796874, 0.47766015625, 0.4779346008300781, 0.47730584716796876, 0.47776666259765627, 0.4777215881347656, 0.4779438171386719, 0.4776980590820312, 0.4786534423828125, 0.47755673217773437, 0.4775034790039063, 0.47755059814453127, 0.47741543579101564, 0.4771686401367187, 0.47783526611328125, 0.4779069519042969, 0.4776560668945313, 0.9921392822265624, 0.4773294067382812, 0.4772085876464844, 0.477370361328125, 0.4770396728515625, 0.4769504699707031, 0.47741748046875, 0.47746356201171875, 0.4771512451171875, 0.47819979858398437, 0.4786329650878906, 0.47734783935546876, 0.477412353515625, 0.4770017395019531, 0.47757003784179686, 0.47735910034179685, 0.47719219970703125, 0.4772116394042969, 0.4775659484863281, 0.47811892700195313, 0.4780257263183594, 0.4779304809570312, 0.4777492370605469, 0.4775045166015625, 0.4776427612304687, 0.4780738525390625, 0.4773498840332031, 0.47781170654296873, 0.4772178039550781, 0.47686654663085937, 0.4779991149902344, 0.47811175537109374, 0.47742669677734373, 0.4774471740722656, 0.47721368408203124, 0.4768757629394531, 0.47745944213867186, 0.4774481811523438, 0.4774799499511719, 0.4782315979003906, 0.4776785583496094, 0.4772024230957031, 0.477707275390625, 0.47777587890625, 0.47741030883789065, 0.47758950805664063, 0.47756494140625, 0.47724337768554687, 0.4777215881347656, 0.47767962646484374, 0.47741543579101564, 0.47842715454101564, 0.4785602111816406, 0.4774819946289062, 0.4774615173339844, 0.4775465087890625, 0.4775475158691406, 0.4778106994628906, 0.4775577697753906, 0.4773294067382812, 0.47762738037109376, 0.47781375122070313, 0.4781895751953125, 0.9939957885742188, 0.47789157104492186, 0.4779029235839844, 0.4775321044921875, 0.4773294067382812, 0.47682968139648435, 0.4777994384765625, 0.478455810546875, 0.4786780090332031, 0.47789056396484375, 0.47756390380859376, 0.4770979919433594, 0.47737344360351563, 0.4772915344238281, 0.4772536315917969, 0.4778649597167969, 0.47748095703125, 0.4771614685058594, 0.47814862060546875, 0.47750143432617187, 0.4773918762207031, 0.47743896484375, 0.4776662902832031, 0.47875582885742185, 0.4773284606933594, 0.4779539794921875, 0.4778567810058594, 0.47754238891601564, 0.47780154418945314, 0.47773382568359374, 0.4781598815917969, 0.4780472412109375, 0.4777594909667969, 0.477765625, 0.47762841796875, 0.47739596557617187, 0.47771444702148436, 0.47763250732421875, 0.4777687072753906, 0.4783943786621094, 0.47779736328125, 0.4777215881347656, 0.47773284912109376, 0.47762841796875, 0.4779315185546875, 0.4779735107421875, 0.4777461853027344, 0.4787445983886719, 0.47787722778320313, 0.4785080261230469, 0.4784373779296875, 0.47873638916015626, 0.47828274536132814, 0.4777001037597656, 0.47793560791015627, 0.4780277709960937, 0.47800115966796874, 0.47787213134765627, 0.47758950805664063, 0.47742156982421874, 0.47888998413085937, 0.478159912109375, 0.47799700927734373, 0.9920173950195312, 0.47721368408203124, 0.4778260498046875, 0.477939697265625, 0.47749530029296877, 0.4772659912109375, 0.4777287292480469, 0.4779949645996094, 0.47825204467773436, 0.4779581298828125, 0.4773775329589844, 0.4774225769042969, 0.47868109130859376, 0.4778076171875, 0.47798785400390625, 0.478139404296875, 0.47792538452148436, 0.4779366455078125, 0.4774615173339844, 0.47746356201171875, 0.477728759765625, 0.47751473999023436, 0.47717376708984377, 0.47695462036132813, 0.4774942626953125, 0.47761920166015626, 0.4774625244140625, 0.4780001220703125, 0.4778526611328125, 0.47788134765625, 0.4775301513671875, 0.47762127685546873, 0.4773498229980469, 0.4779581298828125, 0.4775413818359375, 0.47721881103515623, 0.47748504638671874, 0.4778824462890625, 0.47803488159179686, 0.47796633911132813, 0.47795098876953124, 0.47843328857421874, 0.4778895263671875, 0.47771749877929687, 0.47763766479492187, 0.47745123291015623, 0.47731610107421873, 0.47720550537109374, 0.47810763549804686, 0.47794686889648436, 0.478023681640625, 0.47817214965820315, 0.4775679931640625, 0.4772167663574219, 0.4781783142089844, 0.4780738525390625, 0.4782264404296875, 0.4780769348144531, 0.4779223022460938, 0.4776662902832031, 0.47806668090820315, 0.4780738525390625, 0.47803903198242187, 0.9936957397460937, 0.47736831665039064, 0.4772413330078125, 0.47760589599609377, 0.47733966064453126, 0.4780892028808594, 0.47794790649414065, 0.4774604797363281, 0.47752294921875, 0.4775792541503906, 0.47779022216796874, 0.4776407165527344, 0.4775096435546875, 0.4774523010253906, 0.4773304443359375, 0.4777021484375, 0.477939697265625, 0.4781055908203125, 0.4771205749511719, 0.477898681640625, 0.47731201171875, 0.47782196044921876, 0.4773775329589844, 0.4771747741699219, 0.4777697143554688, 0.47787213134765627, 0.4778188781738281, 0.4779683837890625, 0.47738983154296877, 0.4772915344238281, 0.4775802917480469, 0.47765914916992186, 0.47731201171875, 0.47755877685546877, 0.4773355407714844, 0.4775382995605469, 0.477918212890625, 0.47747378540039065, 0.4770508728027344, 0.4778793029785156, 0.477528076171875, 0.47719937133789064, 0.4775516052246094, 0.4772341613769531, 0.4773990478515625, 0.47757415771484374, 0.47758746337890623, 0.47739495849609376, 0.47834423828125, 0.4778536682128906, 0.4773058776855469, 0.47772262573242186, 0.4777635498046875, 0.4777185363769531, 0.4785080261230469, 0.4780687255859375, 0.4775168151855469, 0.4807618408203125, 0.47829605102539063, 0.47767245483398435, 0.4783206481933594, 0.4779980773925781, 0.4780625915527344, 0.9916989135742188, 0.47712973022460936, 0.4770652160644531, 0.4772781982421875, 0.47782296752929687, 0.47800833129882814, 0.47793356323242187, 0.47763150024414064, 0.47709390258789064, 0.47777484130859377, 0.4777667236328125, 0.47767550659179686, 0.47757720947265625, 0.4771798400878906, 0.47727410888671873, 0.47779531860351565, 0.4776089477539063, 0.4775792541503906, 0.47766937255859376, 0.47783627319335936, 0.47713690185546875, 0.47779226684570314, 0.4778567810058594, 0.4774615173339844, 0.4775833740234375, 0.47805950927734375, 0.47846502685546877, 0.478561279296875, 0.4782633056640625, 0.4781055908203125, 0.47865753173828124, 0.47780044555664064, 0.47805950927734375, 0.4787906494140625, 0.47858688354492185, 0.47910400390625, 0.47804107666015627, 0.4773498840332031, 0.47763455200195315, 0.4782264404296875, 0.47779736328125, 0.47735809326171874, 0.4779683837890625, 0.4779571228027344, 0.47788134765625, 0.47783627319335936, 0.4788439025878906, 0.47873126220703127, 0.47824075317382814, 0.47784039306640624, 0.47777279663085936, 0.47869952392578125, 0.4775126953125, 0.47748504638671874, 0.4778311767578125, 0.47821005249023435, 0.478708740234375, 0.4786903076171875, 0.4786278381347656, 0.4780533752441406, 0.47773080444335936, 0.47787213134765627, 0.47809127807617186, 0.9952440185546875, 0.4780052490234375, 0.47796734619140624, 0.47861553955078123, 0.47840972900390627, 0.478055419921875, 0.4771358642578125, 0.47734375, 0.4771768188476562, 0.47729766845703125, 0.477949951171875, 0.4772239379882813, 0.4777277526855469, 0.47779531860351565, 0.47714407348632815, 0.47731814575195314, 0.47762841796875, 0.47755877685546877, 0.477528076171875, 0.4772034606933594, 0.47703143310546875, 0.47779531860351565, 0.4779202575683594, 0.4790067138671875, 0.478593017578125, 0.4792412109375, 0.47822540283203124, 0.4779069519042969, 0.4779427795410156, 0.47770932006835937, 0.47803802490234376, 0.4777840576171875, 0.47797247314453123, 0.477939697265625, 0.47781991577148436, 0.4778240051269531, 0.47803289794921877, 0.47818240356445313, 0.47798175048828123, 0.4779959716796875, 0.4778680419921875, 0.4780902404785156, 0.47819674682617186, 0.47809945678710936, 0.4779765625, 0.479388671875, 0.4785356750488281, 0.4774143981933594, 0.4776642456054688, 0.47775238037109374, 0.4779253234863281, 0.4778547058105469, 0.4781363220214844, 0.47750860595703126, 0.4780830688476562, 0.47821728515625, 0.4779703674316406, 0.47782196044921876, 0.4780851135253906, 0.4781322326660156, 0.47786392211914064, 0.4776509399414062, 0.4776099853515625, 0.9929461669921875, 0.47778509521484375, 0.48122674560546874, 0.47778509521484375, 0.4777943115234375, 0.47731814575195314, 0.4780748901367188, 0.47792538452148436, 0.47784756469726564, 0.4781158447265625, 0.4780155029296875, 0.47748403930664063, 0.4777891845703125, 0.4779100036621094, 0.4776908874511719, 0.47778713989257815, 0.47860736083984373, 0.47720037841796875, 0.4777891845703125, 0.4777216491699219, 0.4775577087402344, 0.47795404052734375, 0.47750143432617187, 0.47806668090820315, 0.4786780090332031, 0.477744140625, 0.47708673095703125, 0.47796429443359373, 0.4774686584472656, 0.4776775817871094, 0.4779796447753906, 0.47754855346679687, 0.4775096435546875, 0.4782417907714844, 0.47757107543945315, 0.4774205322265625, 0.4781158447265625, 0.4779376525878906, 0.47806362915039063, 0.47810763549804686, 0.4782090148925781, 0.478413818359375, 0.47833804321289064, 0.4782417907714844, 0.4790927429199219, 0.4781537170410156, 0.47856845092773437, 0.4775301208496094, 0.4781506652832031, 0.4774615173339844, 0.4780472412109375, 0.4780318603515625, 0.47794891357421876, 0.47838311767578123, 0.47825408935546876, 0.4780349426269531, 0.47770111083984373, 0.4779857788085938, 0.47789266967773436, 0.47788128662109375, 0.4787712097167969, 0.47795306396484377, 0.47804415893554686, 0.9943838500976563, 0.4787189636230469, 0.47804107666015627, 0.47781375122070313, 0.477370361328125, 0.4773304443359375, 0.47816705322265624, 0.47811993408203124, 0.4777738342285156, 0.47763250732421875, 0.4774573974609375, 0.47708673095703125, 0.47772467041015626, 0.47725567626953125, 0.47783627319335936, 0.47736831665039064, 0.4771112976074219, 0.478445556640625, 0.47864935302734374, 0.4780984191894531, 0.47761102294921876, 0.4776365966796875, 0.4789073791503906, 0.4771778564453125, 0.47777484130859377, 0.47784756469726564, 0.4784169006347656, 0.4775045166015625, 0.47752191162109375, 0.47713177490234376, 0.47731610107421873, 0.47756390380859376, 0.4773212280273437, 0.47744512939453126, 0.4777543640136719, 0.477222900390625, 0.477707275390625, 0.47724850463867186, 0.47723724365234377, 0.4778823547363281, 0.477412353515625, 0.4772392883300781, 0.4777994384765625, 0.47849676513671874, 0.4779949951171875, 0.47791104125976563, 0.47725466918945314, 0.47723629760742187, 0.47771026611328127, 0.478139404296875, 0.4780195922851562, 0.478129150390625, 0.47857867431640627, 0.47747174072265625, 0.47793869018554686, 0.47770932006835937, 0.4779571228027344, 0.47765509033203124, 0.4775464172363281, 0.47778713989257815, 0.477918212890625, 0.4777820129394531, 0.47804107666015627, 0.9942221069335937, 0.47767550659179686, 0.4771686401367187, 0.477431884765625, 0.4771603698730469, 0.47666278076171875, 0.4774676513671875, 0.47758746337890623, 0.47736114501953125, 0.4776488952636719, 0.47701708984375, 0.47684402465820314, 0.47745944213867186, 0.477633544921875, 0.4778076171875, 0.4772536315917969, 0.47761715698242185, 0.47721881103515623, 0.4775362548828125, 0.47752294921875, 0.4779653015136719, 0.47805645751953124, 0.47783526611328125, 0.47724032592773435, 0.4773447570800781, 0.4780175476074219, 0.4793231506347656, 0.4781588439941406, 0.47761715698242185, 0.47719937133789064, 0.4781537170410156, 0.47752294921875, 0.4775311279296875, 0.47761920166015626, 0.4774993896484375, 0.4768399353027344, 0.4775946960449219, 0.47729248046875, 0.47762841796875, 0.4775372924804687, 0.4772720642089844, 0.4776089477539063, 0.4787261352539062, 0.47772262573242186, 0.47747378540039065, 0.47768780517578124, 0.477528076171875, 0.477149169921875, 0.4776355895996094, 0.47729766845703125, 0.4775628662109375, 0.47870361328125, 0.4778188781738281, 0.4776509399414062, 0.4779898986816406, 0.4782561340332031, 0.47833087158203125, 0.47798681640625, 0.4781035461425781, 0.477955078125, 0.47816192626953125, 0.4783472595214844, 0.47877734375]",tokens/s,2.061137207272146,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpeewmpql9/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpjx3omg7y/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1723.056128,9941.024768,0.0,9294.577664,8910.495232,s,10,10.523069458007814,1.0523069458007812,0.0008028232056597039,1.0523904418945311,1.0530852783203124,1.05345859375,1.05375724609375,"[1.0527669677734375, 1.0538319091796875, 1.05145703125, 1.0517464599609374, 1.052252197265625, 1.0511856689453125, 1.0525286865234376, 1.0530023193359375, 1.0528663330078125, 1.051431884765625]",tokens/s,243.27502638043498,kWh,1.2425229681862726e-05,6.805541535795782e-06,6.044174279779546e-05,7.967251401545396e-05,tokens/kWh,3213153.283331113,MB,1723.056128,9941.024768,0.0,9294.577664,9220.876288,s,10,626.3534921875,62.63534921875,0.004239119732727424,62.635099609375004,62.63956796875,62.641526171875,62.643092734375,"[62.6319921875, 62.6329375, 62.6391328125, 62.643484375, 62.63139453125, 62.6282421875, 62.63577734375, 62.634421875, 62.63843359375, 62.63767578125]",tokens/s,1.0058218048721415,kWh,0.0007394197557369868,0.0004052695339850652,0.003591932262432407,0.004736621552154459,tokens/kWh,13300.619292952455,,s,629,634.8295839233394,1.009268018956025,0.1250286498200473,0.9942149047851563,0.9945757568359375,0.99473427734375,2.0461909814453128,"[0.9937213745117187, 0.9941636962890625, 0.9939281616210938, 0.9936691284179687, 0.9940469970703125, 0.9936640014648438, 0.99386572265625, 0.9940029296875, 0.9938134765625, 0.9938718872070312, 0.9940316162109375, 0.9940879516601563, 0.9947422485351562, 0.9942005615234375, 0.994029541015625, 0.993860595703125, 0.9938984985351562, 0.994188232421875, 0.9941381225585938, 0.9940950927734375, 0.9939527587890625, 0.9939947509765625, 0.99481396484375, 0.994408447265625, 0.994361328125, 0.9940614013671875, 0.99426806640625, 0.9940326538085937, 0.9939323120117187, 0.993944580078125, 0.9940070190429687, 0.994107421875, 0.994229248046875, 0.9940459594726563, 0.9943674926757813, 0.9942251586914063, 0.99422412109375, 0.9942691650390625, 0.9943838500976563, 0.994155517578125, 0.994197509765625, 0.9944873046875, 0.9943777465820313, 0.994418701171875, 0.994387939453125, 0.9942783813476562, 0.9941801147460938, 0.9943582763671875, 0.9945180053710938, 0.9940408325195312, 0.9938134765625, 0.9940326538085937, 0.9944760131835938, 0.9945057373046875, 0.9942804565429687, 0.99428662109375, 0.9941749877929688, 0.994018310546875, 0.9942005615234375, 0.994018310546875, 0.9942138671875, 0.99439306640625, 2.048400390625, 0.9937254638671875, 0.9939384155273437, 0.993649658203125, 0.993681396484375, 0.993491943359375, 0.9935380249023438, 0.993723388671875, 0.9939885864257813, 0.993955810546875, 0.9941115112304687, 0.9941227416992188, 0.9936732177734375, 0.9938882446289062, 0.993818603515625, 0.9939773559570313, 0.9937152099609375, 0.9938954467773438, 0.994029541015625, 0.9940746459960937, 0.9940223999023438, 0.9943040161132812, 0.9939876098632813, 0.9940018920898438, 0.993870849609375, 0.9941780395507812, 0.9940029296875, 0.9939568481445312, 0.9940223999023438, 0.9943408813476563, 0.9943121948242187, 0.9944913940429687, 0.994107421875, 0.9944105224609375, 0.9942425537109375, 0.9945681762695312, 0.9944022827148438, 0.9939404907226562, 0.994323486328125, 0.9944729614257812, 0.9945559692382813, 0.9941841430664062, 0.9939691772460938, 0.9944022827148438, 0.9942015991210937, 0.9941749877929688, 0.996316162109375, 0.994234375, 0.9943941040039063, 0.9945528564453125, 0.9943756713867188, 0.9942384643554687, 0.99406640625, 0.9946685180664062, 0.9946071166992188, 0.9947156372070313, 0.9943460083007812, 0.9942159423828125, 0.9944033203125, 0.994250732421875, 0.994207763671875, 0.9942732543945313, 0.9941258544921875, 2.0457523193359375, 0.9937326049804688, 0.993923095703125, 0.9941954345703125, 0.9940490112304687, 0.9940633544921875, 0.9942056884765625, 0.9942149047851563, 0.9938370361328125, 0.9937274780273437, 0.9940162353515625, 0.9940469970703125, 0.99371826171875, 0.9940367431640625, 0.9938534545898438, 0.9942271728515625, 0.9941575927734375, 0.9941626586914063, 0.9939773559570313, 0.9939947509765625, 0.993728515625, 0.993818603515625, 0.99376025390625, 0.9947381591796876, 0.9940807495117188, 0.9942988891601563, 0.9943746337890625, 0.9940838623046875, 0.9939465942382812, 0.9941636962890625, 0.9940090942382812, 0.99420263671875, 0.9942149047851563, 0.9942138671875, 0.9943306274414062, 0.9944053955078125, 0.9946675415039062, 0.9946388549804688, 0.9943070678710938, 0.9942528076171875, 0.9945692138671876, 0.9944391479492187, 0.9944893188476562, 0.9945906982421875, 0.9944791259765625, 0.99445556640625, 0.9944688720703125, 0.9956966552734375, 0.9946471557617188, 0.9943448486328125, 0.99428662109375, 0.9943275756835938, 0.9942988891601563, 0.9942845458984375, 0.99435107421875, 0.9946736450195313, 0.9949030151367187, 0.99470849609375, 0.9946101684570312, 0.99445556640625, 0.9944708862304688, 0.9945303344726563, 0.9944258422851563, 2.046361572265625, 0.9943121948242187, 0.993902587890625, 0.9938494262695312, 0.9939803466796875, 0.993817626953125, 0.99403369140625, 0.9938892822265625, 0.993890380859375, 0.9941790161132813, 0.9943521118164063, 0.994566162109375, 0.9950842895507812, 0.9945149536132812, 0.9943418579101563, 0.994255859375, 0.9942322998046875, 0.9944514770507813, 0.9944678344726563, 0.9941493530273438, 0.99386572265625, 0.994466796875, 0.994135009765625, 0.9944791259765625, 0.993997802734375, 0.9939465942382812, 0.993850341796875, 0.994050048828125, 0.994281494140625, 0.9941104736328125, 0.9941370849609374, 0.99445556640625, 0.9942169799804688, 0.9945046997070313, 0.9944360961914063, 0.994355224609375, 0.9945057373046875, 0.9943777465820313, 0.9944063720703125, 0.9943849487304688, 0.9944248046875, 0.9944248046875, 0.9944043579101562, 0.9945538330078125, 0.9957181396484375, 0.9942650756835938, 0.9944627075195313, 0.9945119018554688, 0.9939578857421875, 0.9942630615234375, 0.9940695190429687, 0.994323486328125, 0.9945487060546875, 0.9944248046875, 0.9942774047851562, 0.9945712890625, 0.9945681762695312, 0.994572265625, 0.9946071166992188, 0.9948231811523438, 0.994545654296875, 0.9945897216796875, 0.994703369140625, 2.0469268798828124, 0.9937100830078125, 0.99370703125, 0.9939507446289062, 0.9940214233398438, 0.993982421875, 0.9939998779296875, 0.994187255859375, 0.9942363891601562, 0.994165771484375, 0.9937623291015625, 0.9936098022460937, 0.9937017822265625, 0.99392919921875, 0.9935421142578125, 0.9941094360351562, 0.9936434936523437, 0.9947597045898438, 0.9939476318359375, 0.9940572509765625, 0.993876953125, 0.9940459594726563, 0.9939384155273437, 0.9936783447265625, 0.9934899291992187, 0.9937745971679688, 0.9935821533203125, 0.9939537353515625, 0.993924072265625, 0.9939793701171875, 0.9942067260742188, 0.9939005737304687, 0.9939332885742187, 0.9940510864257812, 0.9938903198242187, 0.9940438842773438, 0.9939251098632812, 0.9943121948242187, 0.9945211181640625, 0.9941473388671875, 0.9944022827148438, 0.9940101318359374, 0.9942968139648437, 0.9944330444335937, 0.994639892578125, 0.9945938110351562, 0.9943070678710938, 0.99437158203125, 0.9943593139648438, 0.9942528076171875, 0.9945589599609375, 0.994482177734375, 0.9939876098632813, 0.9945620727539063, 0.9944330444335937, 0.9943951416015625, 0.9943367919921875, 0.9962434692382812, 0.9943162841796875, 0.994186279296875, 0.9945906372070312, 0.9944248657226562, 0.9944125366210937, 2.04558642578125, 0.993912841796875, 0.9936803588867188, 0.9937315673828125, 0.9937705688476562, 0.9936362915039062, 0.9941083984375, 0.9939169311523437, 0.993712158203125, 0.9938135375976562, 0.9937837524414063, 0.9940541381835938, 0.9938093872070313, 0.9940275268554688, 0.9939824829101562, 0.9938954467773438, 0.9938401489257812, 0.9937479858398437, 0.9936363525390625, 0.9938779907226563, 0.9936895751953125, 0.9938565063476562, 0.9942282104492187, 0.9945733032226562, 0.9943101196289063, 0.994271240234375, 0.9942517700195312, 0.9937531127929687, 0.9937540893554687, 0.9940654296875, 0.9936906127929688, 0.9937561645507812, 0.9941299438476563, 0.994255859375, 0.9943173217773438, 0.99435107421875, 0.9943480224609375, 0.9938595581054688, 0.9955552978515625, 0.9940582275390625, 0.994145263671875, 0.9937991943359376, 0.9941483764648438, 0.994471923828125, 0.9942374267578125, 0.9941565551757813, 0.994735107421875, 0.99420263671875, 0.9940562133789063, 0.994302978515625, 0.9941094360351562, 0.9940899658203125, 0.9943009033203125, 0.9945149536132812, 0.9943971557617187, 0.9942517700195312, 0.9943009033203125, 0.9943582763671875, 0.9941473388671875, 0.9944699096679688, 0.9942067260742188, 0.9939999389648437, 0.994462646484375, 2.0474276123046873, 0.9942875366210937, 0.9945169677734375, 0.9941104736328125, 0.99388623046875, 0.99411865234375, 0.9940193481445313, 0.9938565063476562, 0.9939681396484376, 0.9939568481445312, 0.9942251586914063, 0.9939885864257813, 0.9937736206054687, 0.9940674438476562, 0.9934888916015625, 0.9936240844726563, 0.9936025390625, 0.9939957885742188, 0.9941309204101563, 0.9939783935546875, 0.9941585693359375, 0.9935872192382813, 0.9937633056640625, 0.99496142578125, 0.9941268310546875, 0.9944862670898438, 0.993723388671875, 0.9938739013671875, 0.9938882446289062, 0.9944063720703125, 0.9942282104492187, 0.9941043090820313, 0.9945272216796875, 0.9945446166992188, 0.9939815063476563, 0.9940366821289063, 0.9942149047851563, 0.9942435913085937, 0.9943531494140625, 0.9939906616210937, 0.994450439453125, 0.9943357543945313, 0.9943040161132812, 0.9945149536132812, 0.9941370849609374, 0.9942384643554687, 0.99449853515625, 0.9946624145507813, 0.9942548217773437, 0.9945487060546875, 0.9945855712890626, 0.9943418579101563, 0.9944760131835938, 0.9945589599609375, 0.9944545288085938, 0.9943593139648438, 0.9944330444335937, 0.9942958374023437, 0.9943746337890625, 0.9943889770507812, 0.9947330322265625, 0.9947310180664063, 0.9944688720703125, 2.047277099609375, 0.9937192993164062, 0.9936015625, 0.993828857421875, 0.9938534545898438, 0.9940787353515625, 0.9938462524414062, 0.9938677978515625, 0.9937264404296875, 0.9940316162109375, 0.9937715454101562, 0.9938134765625, 0.9937797241210937, 0.9938841552734375, 0.9937469482421875, 0.9938042602539062, 0.9948897094726562, 0.99428759765625, 0.99430810546875, 0.9943807983398437, 0.9940869140625, 0.994150390625, 0.9942937622070313, 0.994207763671875, 0.9942947998046875, 0.9942835083007813, 0.9943101196289063, 0.994366455078125, 0.9940070190429687, 0.9944422607421874, 0.9940172729492187, 0.9939844970703124, 0.9939251098632812, 0.9939476318359375, 0.9940101318359374, 0.9939158935546875, 0.9941954345703125, 0.9944647827148437, 0.994376708984375, 0.9946132202148438, 0.9943787231445312, 0.9942466430664062, 0.994313232421875, 0.9944729614257812, 0.9945261840820312, 0.9942916870117188, 0.9944391479492187, 0.9942671508789063, 0.994545654296875, 0.994798583984375, 0.9944873046875, 0.9943193359375, 0.994164794921875, 0.9943551635742187, 0.9943255004882813, 0.99406640625, 0.9942916870117188, 0.994587646484375, 0.9942916870117188, 0.9949767456054688, 0.9941903076171875, 0.9943889770507812, 0.9943162841796875, 2.047072265625, 0.9936138305664063, 0.9937510375976563, 0.9939783935546875, 0.9938709106445313, 0.9944656982421874, 0.993997802734375, 0.99403369140625, 0.993713134765625, 0.9936732177734375, 0.9935554809570313, 0.9939323120117187, 0.9941073608398437, 0.994260986328125, 0.9937418212890625, 0.9940961303710938, 0.9939844970703124, 0.9946102294921875, 0.993966064453125, 0.9940264892578125, 0.9938206787109375, 0.9939199829101563, 0.9943121948242187, 0.9942937622070313, 0.9941575927734375, 0.9946552124023438, 0.9939906616210937, 0.9938114624023437, 0.9936793823242187, 0.9941647338867188, 0.9941442260742187, 0.994471923828125, 0.9945323486328125, 0.99468798828125, 0.9943142700195312, 0.994466796875, 0.9942467041015625, 0.9940725708007813, 0.9942282104492187, 0.994255859375, 0.9944166259765626, 0.9944391479492187, 0.9945436401367187, 0.9948764038085938, 0.99462451171875, 0.9944658203125, 0.994460693359375, 0.994155517578125, 0.9942097778320312, 0.9942763671875, 0.9942702026367187, 0.994260986328125, 0.9944309692382812, 0.9945098266601563, 0.9942968139648437, 0.9946491088867188, 0.9945579223632812, 0.99460302734375, 0.994281494140625, 0.9945108642578125, 0.99460302734375, 0.9943797607421875, 0.9946961669921875, 2.04813623046875, 0.99420263671875, 0.99418115234375, 0.9938411254882813, 0.9938524169921875, 0.9939671020507812, 0.9941248168945312, 0.994044921875, 0.9940910034179687, 0.9938984985351562, 0.9937213745117187, 0.9939066772460937, 0.9943142700195312, 0.9939824829101562, 0.9937520751953125, 0.9941227416992188, 0.9939323120117187, 0.9941923828125, 0.9938759765625, 0.9943316650390625, 0.994208740234375, 0.99416064453125, 0.9944412231445312, 0.99399169921875, 0.9939773559570313, 0.9942149658203125, 0.9955389404296875, 0.9943828735351562, 0.9939456176757813, 0.9944596557617188, 0.9941442260742187, 0.9940377807617188, 0.9945589599609375, 0.994135009765625, 0.9938985595703125, 0.9942609252929687, 0.994212890625, 0.9942732543945313, 0.9943726196289062, 0.994460693359375, 0.9944248657226562, 0.9942844848632812, 0.9949808349609375, 0.9943121948242187, 0.9940910034179687, 0.994150390625, 0.9942640380859376, 0.9941688232421875, 0.9940387573242188, 0.9946736450195313, 0.994753662109375, 0.99428857421875, 0.994555908203125, 0.9943367919921875, 0.9942702026367187, 0.994302978515625, 0.994234375, 0.9944371337890625, 0.9942466430664062, 0.9943602905273438, 0.9947944946289062, 0.99437158203125, 0.9947586669921875]",tokens/s,0.9908170884423626,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ba3-5006929269d2f7ac4351ab3e;e50a0c21-e48d-4095-8c5e-303c989f0217) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1866.50624,3328.704512,0.0,2682.257408,2578.238464,s,10,1.4175592651367188,0.14175592651367186,0.0022402045159939975,0.14135794067382812,0.14292337188720702,0.14542544631958007,0.1474271058654785,"[0.14792752075195312, 0.14161701965332033, 0.1391898193359375, 0.1415839385986328, 0.14236735534667969, 0.14101449584960937, 0.13986236572265626, 0.1410111083984375, 0.14185369873046874, 0.14113194274902344]",tokens/s,1805.9209677932563,kWh,1.6547061226986076e-06,9.066846465884736e-07,6.786722250360737e-06,9.348113019647817e-06,tokens/kWh,27385205.919305906,MB,1866.50624,3328.704512,0.0,2682.257408,2667.0976,s,10,83.8020732421875,8.38020732421875,0.04772557025936409,8.37658544921875,8.433103320312501,8.4430912109375,8.4510815234375,"[8.4288447265625, 8.39970703125, 8.4144833984375, 8.4530791015625, 8.4308837890625, 8.3407841796875, 8.3356474609375, 8.31612109375, 8.32905859375, 8.3534638671875]",tokens/s,7.51771377038971,kWh,9.806880771561905e-05,5.3749043913185225e-05,0.00039216448579924195,0.0005439823374280462,tokens/kWh,115812.5837281126,,s,629,84.93340989685055,0.1350292685164556,0.016826908436194207,0.13322035217285155,0.13436354370117187,0.1346594787597656,0.27325439575195315,"[0.1383321533203125, 0.13691903686523438, 0.13555609130859375, 0.1336924133300781, 0.1340948486328125, 0.13436006164550782, 0.1343426513671875, 0.1342003173828125, 0.13426687622070313, 0.13448089599609375, 0.13400473022460938, 0.13385011291503907, 0.13449215698242187, 0.13411634826660157, 0.13182975769042968, 0.13270323181152344, 0.13469491577148437, 0.13397605895996093, 0.13359616088867188, 0.13410610961914063, 0.13234483337402345, 0.13145497131347655, 0.13187686157226564, 0.1315246124267578, 0.13338418579101563, 0.13427098083496095, 0.13404261779785157, 0.13425152587890626, 0.13394534301757813, 0.13425050354003906, 0.1328660430908203, 0.1342433319091797, 0.13415116882324218, 0.13333401489257812, 0.1342013397216797, 0.13399346923828126, 0.1340968933105469, 0.134508544921875, 0.13404876708984376, 0.1326940155029297, 0.1316812744140625, 0.13177658081054688, 0.13164845275878906, 0.13489971923828126, 0.13331251525878907, 0.1342248992919922, 0.13452799987792968, 0.13407334899902343, 0.13159321594238282, 0.13165260314941407, 0.13322035217285155, 0.13399244689941406, 0.13447474670410156, 0.13405696105957032, 0.13407948303222655, 0.13395046997070312, 0.13439283752441405, 0.1342300109863281, 0.13573222351074218, 0.1321625671386719, 0.13170687866210937, 0.13169970703125, 0.2737838134765625, 0.13152255249023437, 0.13275955200195313, 0.13433351135253907, 0.1345811767578125, 0.13403852844238281, 0.13368319702148437, 0.13302169799804686, 0.13411737060546874, 0.13280256652832031, 0.13474508666992188, 0.13335859680175782, 0.1340590057373047, 0.13169459533691405, 0.13173248291015624, 0.13171405029296876, 0.13407743835449218, 0.1342689208984375, 0.13365248107910158, 0.13403340148925783, 0.1339299774169922, 0.13434367370605468, 0.1315061798095703, 0.13148466491699218, 0.13182156372070314, 0.1337518005371094, 0.1333534698486328, 0.1340999755859375, 0.13400575256347655, 0.13407852172851562, 0.1340497283935547, 0.13159117126464845, 0.13308108520507814, 0.13406515502929686, 0.13437747192382812, 0.1340518341064453, 0.1342433319091797, 0.13421466064453125, 0.13415834045410155, 0.1342986297607422, 0.13406617736816406, 0.13197926330566406, 0.1316177978515625, 0.13170687866210937, 0.13170687866210937, 0.13363917541503906, 0.13486598205566405, 0.13352549743652345, 0.13383468627929687, 0.13421055603027343, 0.13174989318847657, 0.13169973754882813, 0.13152662658691405, 0.13283226013183594, 0.13399142456054688, 0.13478297424316407, 0.13450035095214843, 0.1341460418701172, 0.1339115447998047, 0.13337496948242186, 0.134002685546875, 0.13234687805175782, 0.13289779663085938, 0.2724710388183594, 0.13155020141601562, 0.13163827514648438, 0.131810302734375, 0.1316618194580078, 0.13126144409179688, 0.13352549743652345, 0.13428326416015626, 0.13351628112792968, 0.13398326110839845, 0.1340497589111328, 0.1338419189453125, 0.13386444091796876, 0.13370777893066407, 0.13431910705566405, 0.1338470458984375, 0.13402931213378907, 0.13414399719238282, 0.13386752319335937, 0.13391973876953125, 0.13366067504882811, 0.13545677185058594, 0.13440921020507812, 0.13534104919433593, 0.13244415283203126, 0.134466552734375, 0.13180108642578126, 0.131599365234375, 0.13178880310058594, 0.13154098510742188, 0.13177754211425782, 0.13156147766113283, 0.13368832397460936, 0.134044677734375, 0.13403347778320313, 0.13381727600097656, 0.13421772766113282, 0.1335265350341797, 0.13435289001464842, 0.133959716796875, 0.13362889099121095, 0.1339842529296875, 0.13384602355957032, 0.13403443908691406, 0.13420748901367188, 0.13402316284179688, 0.1340590057373047, 0.13474713134765626, 0.13409791564941406, 0.13352960205078124, 0.13388697814941405, 0.1340088348388672, 0.13404978942871093, 0.13377023315429687, 0.13412864685058593, 0.13397708129882813, 0.13398016357421874, 0.13395968627929689, 0.1340712890625, 0.13423004150390624, 0.13353570556640626, 0.13389414978027345, 0.13325209045410155, 0.27623629760742185, 0.134403076171875, 0.13430271911621094, 0.13406105041503907, 0.1344286651611328, 0.1346447296142578, 0.13446553039550782, 0.13406207275390625, 0.13451161193847655, 0.13428941345214843, 0.1343856658935547, 0.13401190185546874, 0.134466552734375, 0.13412249755859376, 0.13459762573242187, 0.1339463653564453, 0.13446553039550782, 0.13476864624023438, 0.1342044219970703, 0.13403443908691406, 0.13416653442382812, 0.134276123046875, 0.13387055969238282, 0.13414707946777343, 0.13453106689453126, 0.13373338317871095, 0.13383680725097657, 0.13395046997070312, 0.133791748046875, 0.13404570007324218, 0.1341265869140625, 0.13388697814941405, 0.1346693115234375, 0.1340518341064453, 0.1335562286376953, 0.1343969268798828, 0.13446348571777345, 0.1351065673828125, 0.1340641326904297, 0.13445733642578125, 0.13435903930664062, 0.1340631103515625, 0.1340518341064453, 0.13444816589355468, 0.13536051940917967, 0.13402621459960937, 0.13327052307128906, 0.13432012939453125, 0.1344593963623047, 0.13412249755859376, 0.1338419189453125, 0.13387469482421874, 0.13338726806640624, 0.13416653442382812, 0.13386854553222657, 0.13441127014160156, 0.13412351989746094, 0.13347328186035157, 0.13382246398925782, 0.13420338439941407, 0.1338275909423828, 0.13340467834472655, 0.1342750701904297, 0.27683428955078127, 0.13397196960449217, 0.1343129577636719, 0.1338665008544922, 0.13388082885742186, 0.1340958709716797, 0.1340712890625, 0.13391871643066405, 0.13356646728515625, 0.1333104705810547, 0.13409178161621094, 0.133396484375, 0.1351403503417969, 0.13372825622558593, 0.13417575073242188, 0.13519052124023437, 0.1343938903808594, 0.1338654327392578, 0.133359619140625, 0.1340712890625, 0.13396890258789063, 0.1339894104003906, 0.1338838653564453, 0.13374668884277344, 0.133538818359375, 0.13404876708984376, 0.13366169738769532, 0.1339351043701172, 0.13478195190429687, 0.13399859619140625, 0.13399449157714843, 0.13394432067871093, 0.13413682556152343, 0.1340262451171875, 0.13399449157714843, 0.13403546142578124, 0.13435289001464842, 0.1340712890625, 0.13406112670898437, 0.1335233917236328, 0.13358387756347656, 0.13382553100585937, 0.13504620361328126, 0.13462828063964843, 0.13439897155761718, 0.13393516540527345, 0.13389511108398439, 0.1334097900390625, 0.13408767700195312, 0.13458636474609376, 0.13346713256835938, 0.13414297485351562, 0.13160243225097656, 0.1317795867919922, 0.13214413452148438, 0.13376716613769532, 0.133718017578125, 0.13606605529785157, 0.13217485046386718, 0.13167718505859374, 0.13171302795410156, 0.13281280517578126, 0.13389926147460937, 0.27561163330078126, 0.1342044219970703, 0.13399655151367187, 0.13381837463378907, 0.13696818542480468, 0.13187277221679689, 0.13215437316894532, 0.13239193725585938, 0.1329521026611328, 0.13212156677246092, 0.13276876831054688, 0.132463623046875, 0.13214002990722656, 0.13234072875976563, 0.13270118713378906, 0.13262847900390626, 0.1321871337890625, 0.1320509490966797, 0.13238067626953126, 0.13276876831054688, 0.1321564178466797, 0.1327431640625, 0.1328158721923828, 0.13263360595703125, 0.1317580871582031, 0.1315215301513672, 0.13196083068847655, 0.1318225860595703, 0.13159117126464845, 0.13150003051757814, 0.13156454467773437, 0.13174887084960937, 0.1325496368408203, 0.1326755828857422, 0.13302169799804686, 0.13294898986816406, 0.1324881896972656, 0.13196800231933595, 0.13392588806152345, 0.1337159729003906, 0.13316915893554687, 0.1326755828857422, 0.13233255004882813, 0.13182054138183594, 0.13182566833496093, 0.13186253356933594, 0.13180519104003907, 0.1316822967529297, 0.13177548217773438, 0.13161984252929687, 0.1317611541748047, 0.131852294921875, 0.13162086486816407, 0.1321553955078125, 0.13262950134277343, 0.13191372680664062, 0.13178265380859375, 0.13150515747070313, 0.13170176696777344, 0.1315205078125, 0.13172735595703125, 0.13178778076171874, 0.13167103576660155, 0.2728120422363281, 0.13346815490722655, 0.1335142364501953, 0.1333534698486328, 0.13239910888671874, 0.13167616271972657, 0.13235302734375, 0.1338050537109375, 0.13347225952148437, 0.1334886474609375, 0.1320120391845703, 0.1320816650390625, 0.13306573486328124, 0.13341183471679688, 0.13176934814453126, 0.13158195495605468, 0.13284147644042968, 0.13212979125976562, 0.1316290588378906, 0.1317058563232422, 0.13171098327636718, 0.13157887268066407, 0.13160960388183593, 0.13298486328125, 0.13236732482910157, 0.13162188720703125, 0.13165260314941407, 0.13161062622070313, 0.13162495422363282, 0.13157376098632811, 0.13175910949707031, 0.13201100158691406, 0.13279539489746095, 0.13196493530273437, 0.13224140930175782, 0.13323365783691407, 0.13173248291015624, 0.13189529418945312, 0.133254150390625, 0.13235711669921876, 0.1316505584716797, 0.13167922973632812, 0.13156556701660158, 0.13257522583007814, 0.1328158721923828, 0.13173350524902344, 0.13212261962890626, 0.13244415283203126, 0.13331968688964843, 0.1319720916748047, 0.13162188720703125, 0.1338275909423828, 0.13188710021972655, 0.13169049072265626, 0.1329100799560547, 0.13289677429199218, 0.13157273864746094, 0.13191987609863282, 0.1330063934326172, 0.13177952575683594, 0.1324830780029297, 0.1317232666015625, 0.132210693359375, 0.2737438659667969, 0.13398220825195312, 0.13256195068359375, 0.13281686401367188, 0.13316409301757812, 0.13165664672851562, 0.131557373046875, 0.1315246124267578, 0.1316116485595703, 0.13375692749023438, 0.13275033569335937, 0.13160755920410155, 0.13167514038085937, 0.1316546630859375, 0.13151744079589844, 0.1325260772705078, 0.13415525817871093, 0.13211955261230468, 0.13158604431152343, 0.13158706665039063, 0.131599365234375, 0.13191885375976561, 0.13164959716796876, 0.13163615417480468, 0.131557373046875, 0.1315635223388672, 0.13156454467773437, 0.13150822448730468, 0.13155328369140626, 0.131778564453125, 0.13244825744628907, 0.13156761169433595, 0.13152870178222656, 0.13237452697753907, 0.13321420288085936, 0.1316188507080078, 0.13167100524902345, 0.13171507263183593, 0.13173043823242186, 0.13229055786132812, 0.13326028442382812, 0.13208883666992188, 0.13159730529785157, 0.13276570129394533, 0.13298074340820312, 0.13150210571289062, 0.13231517028808593, 0.13168019104003906, 0.1316177978515625, 0.13166490173339843, 0.13228031921386718, 0.131704833984375, 0.13154815673828124, 0.1315246124267578, 0.13159117126464845, 0.13162803649902344, 0.13144985961914063, 0.13153074645996093, 0.13159423828125, 0.13154304504394532, 0.1314959411621094, 0.13150822448730468, 0.13215335083007812, 0.2743265380859375, 0.13167922973632812, 0.13160243225097656, 0.13170278930664062, 0.13161068725585937, 0.13170375061035156, 0.13167820739746094, 0.13153996276855467, 0.13159628295898437, 0.13154917907714844, 0.13170994567871094, 0.13149491882324219, 0.1311682586669922, 0.1336678466796875, 0.13168333435058593, 0.1315020751953125, 0.13163827514648438, 0.1326510009765625, 0.13166490173339843, 0.131599365234375, 0.1316054992675781, 0.1315574035644531, 0.1316433563232422, 0.13159321594238282, 0.13151744079589844, 0.13169664001464843, 0.13158604431152343, 0.13171609497070313, 0.13180723571777345, 0.13306982421875, 0.1319505920410156, 0.13152665710449218, 0.1316188201904297, 0.13160755920410155, 0.13153485107421875, 0.1316321258544922, 0.13152255249023437, 0.13150413513183593, 0.13162393188476562, 0.13158604431152343, 0.13178880310058594, 0.13175398254394532, 0.13282815551757812, 0.13279335021972657, 0.1332623291015625, 0.13260493469238283, 0.1335029754638672, 0.1333217315673828, 0.13349990844726561, 0.13284454345703126, 0.13330943298339842, 0.13328793334960937, 0.13312818908691407, 0.13345382690429688, 0.13288447570800782, 0.13335142517089843, 0.1332725830078125, 0.1326561279296875, 0.13411943054199219, 0.1333104705810547, 0.13202330017089844, 0.1328404541015625, 0.13351219177246093, 0.27342642211914064, 0.1317181396484375, 0.131778564453125, 0.1316300811767578, 0.13164851379394532, 0.13157376098632811, 0.13182464599609375, 0.1316505584716797, 0.1314580535888672, 0.1316853790283203, 0.13220352172851563, 0.13342617797851564, 0.13168435668945314, 0.1316259765625, 0.13177754211425782, 0.13159423828125, 0.13173356628417968, 0.1318133087158203, 0.1316444091796875, 0.1315082550048828, 0.13163209533691406, 0.13170994567871094, 0.13161062622070313, 0.13163929748535155, 0.13251788330078124, 0.13330738830566408, 0.13480653381347657, 0.13196902465820312, 0.1324451904296875, 0.13310054016113282, 0.13354290771484376, 0.13340882873535156, 0.1336657257080078, 0.13366886901855468, 0.133396484375, 0.13281893920898438, 0.13362892150878905, 0.13327769470214842, 0.1322239990234375, 0.13191474914550783, 0.13346099853515625, 0.1332162628173828, 0.13334835815429688, 0.13336679077148436, 0.13285171508789062, 0.13309132385253905, 0.13161369323730468, 0.13310464477539063, 0.13307391357421874, 0.13365248107910158, 0.13310464477539063, 0.13244313049316406, 0.1326376953125, 0.13354908752441405, 0.13300936889648438, 0.13332179260253907, 0.13388691711425782, 0.13313023376464844, 0.1333780517578125, 0.13311077880859376, 0.13335551452636718, 0.13207347106933592, 0.1331865539550781]",tokens/s,7.405801801245284,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33c7-0cd754b45e1145164f68f15f;e33e9af3-53d7-4bc0-a589-75976608885b) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2287.439872,9584.508928,0.0,8938.061824,8629.72416,s,10,10.131507629394532,1.013150762939453,0.002692660193456455,1.0122420349121093,1.01416416015625,1.017623095703125,1.020390244140625,"[1.02108203125, 1.01157763671875, 1.012674072265625, 1.0126704711914063, 1.0133955078125, 1.012330322265625, 1.0118361206054687, 1.0117332763671876, 1.012054443359375, 1.0121537475585938]",tokens/s,252.67710331408875,kWh,1.195108746488889e-05,6.548659351992683e-06,5.668768423900405e-05,7.518743105588562e-05,tokens/kWh,3404824.4022291345,MB,2287.439872,9584.508928,0.0,8938.061824,8715.664896,s,10,591.45866796875,59.145866796875,0.006617972035565828,59.14582421874999,59.155115234374996,59.1558447265625,59.1564283203125,"[59.14538671875, 59.13608984375, 59.150515625, 59.1451953125, 59.154953125, 59.15657421875, 59.14678515625, 59.1360078125, 59.1408984375, 59.14626171875]",tokens/s,1.0651631874186793,kWh,0.0006982415291666984,0.00038269747479935173,0.0032679308921203906,0.004348869896086441,tokens/kWh,14486.52213226564,,s,629,599.6304891357424,0.9533076138883024,0.12026100633018022,0.9387591552734375,0.9396539428710937,0.9399756713867188,1.9507973779296877,"[0.9389322509765625, 0.9398538208007813, 0.938841064453125, 0.93900390625, 0.9390182495117188, 0.9385973510742187, 0.9396909790039063, 0.938871826171875, 0.9403566284179687, 0.9389732055664063, 0.939009033203125, 0.9392967529296875, 0.9398015747070313, 0.938829833984375, 0.9383045043945313, 0.9377362060546875, 0.9381539916992188, 0.9390131225585937, 0.9386055908203125, 0.9399725952148438, 0.93948828125, 0.939114501953125, 0.939462646484375, 0.93991015625, 0.9391953735351563, 0.9389260864257812, 0.9383475341796875, 0.9386793212890625, 0.9386414184570312, 0.9392066650390625, 0.9393878784179688, 0.9381703491210938, 0.9381140747070312, 0.9383218994140625, 0.9380587768554688, 0.9380526123046875, 0.9383915405273437, 0.9382041625976563, 0.9384386596679688, 0.9379778442382812, 0.9379686279296875, 0.9389004516601562, 0.938387451171875, 0.93830859375, 0.9408757934570312, 0.9392742309570312, 0.9384775390625, 0.9388594970703125, 0.9387110595703125, 0.938039306640625, 0.9378734130859375, 0.93863525390625, 0.9386055908203125, 0.9390182495117188, 0.938181640625, 0.939652099609375, 0.9389639892578125, 0.9383065795898438, 0.939093017578125, 0.9385164794921875, 0.9385799560546875, 0.9382952880859375, 1.9507384033203126, 0.9382799072265625, 0.9386485595703125, 0.938119140625, 0.9379522705078125, 0.9383259887695312, 0.9382891235351563, 0.938335205078125, 0.9381273803710938, 0.9386373291015625, 0.938450927734375, 0.9386209106445312, 0.9381652221679687, 0.9398865966796875, 0.93909912109375, 0.9384284057617187, 0.9378529052734375, 0.9383649291992188, 0.938302490234375, 0.9382522583007813, 0.9403709716796875, 0.9398660888671875, 0.93905615234375, 0.9389526977539062, 0.9395927124023438, 0.9383956298828126, 0.9380956420898438, 0.9384847412109375, 0.9385236206054688, 0.9388482666015625, 0.9386148071289062, 0.93890966796875, 0.938439697265625, 0.93823486328125, 0.9391492919921876, 0.9383259887695312, 0.9384806518554687, 0.9384683227539062, 0.9383198852539063, 0.9388380126953125, 0.9383782348632812, 0.939188232421875, 0.9388707885742188, 0.9384468383789063, 0.9386332397460937, 0.9392158813476562, 0.9388359375, 0.9386854248046875, 0.9379727172851563, 0.9386455078125, 0.9383505859375, 0.938419189453125, 0.93814990234375, 0.9399869384765625, 0.9389414672851563, 0.9384857788085937, 0.93823486328125, 0.9385400390625, 0.9385010986328125, 0.9388738403320313, 0.9391431884765625, 0.9389598999023437, 0.9393756103515625, 1.951994873046875, 0.938608642578125, 0.9388524169921875, 0.9380965576171875, 0.9379512329101563, 0.9383270263671875, 0.9390551147460937, 0.9391697998046875, 0.938060791015625, 0.9392967529296875, 0.9386834106445312, 0.9387222900390625, 0.93907763671875, 0.940732421875, 0.9389188842773437, 0.9391503295898438, 0.9383372802734375, 0.938250244140625, 0.9387908935546875, 0.9389424438476562, 0.939419677734375, 0.9392752685546875, 0.9382041625976563, 0.9396817626953125, 0.9394974975585938, 0.93903564453125, 0.938745849609375, 0.9387335815429687, 0.9390694580078125, 0.9386281127929688, 0.9390643310546875, 0.9388779296875, 0.9388635864257813, 0.9396141967773437, 0.9400760498046875, 0.9385093383789063, 0.9377218627929688, 0.9381908569335937, 0.9382952880859375, 0.9383936157226562, 0.9382778930664063, 0.9385912475585938, 0.9392332763671875, 0.9384796142578125, 0.9385226440429687, 0.9392466430664063, 0.9381528930664063, 0.9381519165039063, 0.938461181640625, 0.9395015869140625, 0.93941455078125, 0.9390643310546875, 0.9385420532226563, 0.94017333984375, 0.939114501953125, 0.9396920166015625, 0.9393316040039063, 0.938903564453125, 0.9387530517578125, 0.9390448608398437, 0.9396613159179688, 0.9389629516601562, 0.938998779296875, 1.9516702880859376, 0.9386065673828125, 0.9402235107421875, 0.9383372802734375, 0.9382932739257812, 0.938365966796875, 0.9387254028320312, 0.938693603515625, 0.9385010986328125, 0.938767333984375, 0.9391104125976563, 0.9382758178710937, 0.93905712890625, 0.9390704345703125, 0.93931005859375, 0.9386598510742188, 0.9379256591796875, 0.938017822265625, 0.9386240234375, 0.9385728149414062, 0.9389793090820312, 0.9392128295898438, 0.938334228515625, 0.9388523559570312, 0.938787841796875, 0.9384253540039063, 0.9385072631835938, 0.93846630859375, 0.9390551147460937, 0.939335693359375, 0.9389865112304687, 0.9403515014648437, 0.938693603515625, 0.9387028198242188, 0.9390755615234375, 0.9390796508789062, 0.9388257446289062, 0.9389475708007813, 0.9399132080078125, 0.938587158203125, 0.9390377197265625, 0.9394114379882812, 0.940221435546875, 0.9385441284179687, 0.9391052856445312, 0.938335205078125, 0.9382625122070313, 0.9383117065429688, 0.9383178100585937, 0.9387202758789063, 0.9384796142578125, 0.9386547241210937, 0.9381621704101563, 0.93876123046875, 0.9382215576171875, 0.9389844360351562, 0.9389209594726563, 0.9384591064453125, 0.9386475219726562, 0.9395773315429687, 0.9389486083984375, 0.9387765502929688, 0.93916162109375, 1.9526563720703125, 0.9392178955078125, 0.9389434814453125, 0.93859326171875, 0.939198486328125, 0.93855126953125, 0.9389004516601562, 0.9386444702148438, 0.9396828002929688, 0.9400084228515625, 0.9395486450195313, 0.93916162109375, 0.939404296875, 0.9389486083984375, 0.939167724609375, 0.9397196655273438, 0.93857177734375, 0.938660888671875, 0.9386322021484375, 0.9401231079101563, 0.939725830078125, 0.9393346557617187, 0.9393930053710937, 0.9400657958984375, 0.9386649780273437, 0.9385441284179687, 0.9381509399414063, 0.9390632934570312, 0.9386444702148438, 0.9382697143554688, 0.939452392578125, 0.9390592041015625, 0.9396920166015625, 0.93861376953125, 0.9394022216796875, 0.939062255859375, 0.9391001586914063, 0.938335205078125, 0.9390151977539063, 0.9384151000976563, 0.9390755615234375, 0.93837109375, 0.9391544189453125, 0.9386065673828125, 0.9394503784179687, 0.9384161376953125, 0.938771484375, 0.9382778930664063, 0.9383301391601563, 0.9384806518554687, 0.9385072631835938, 0.939251708984375, 0.9386219482421875, 0.939736083984375, 0.9381171264648438, 0.939852783203125, 0.9383362426757812, 0.9384755249023438, 0.9382440795898438, 0.9390888671875, 0.9384498901367188, 0.9385430908203125, 0.93985791015625, 1.9517337646484374, 0.9388739013671875, 0.9396459350585937, 0.9385574951171874, 0.9385686645507813, 0.9384365844726562, 0.9387765502929688, 0.93935205078125, 0.9397186279296875, 0.93812939453125, 0.939156494140625, 0.938771484375, 0.9394319458007813, 0.9387591552734375, 0.9385062255859375, 0.9385093383789063, 0.9380311279296875, 0.9388994750976563, 0.9391216430664062, 0.9395814208984375, 0.939610107421875, 0.9395476684570313, 0.9393233642578125, 0.9399511108398437, 0.9398251342773437, 0.9391124267578125, 0.9394176025390625, 0.9400964965820312, 0.9399777221679687, 0.9401446533203125, 0.939125732421875, 0.9388021850585937, 0.9388124389648438, 0.9400350952148437, 0.9387550659179688, 0.9386065673828125, 0.9382359008789063, 0.9382369384765625, 0.93964697265625, 0.9381478271484375, 0.939219970703125, 0.9382389526367187, 0.9393500366210937, 0.9386393432617187, 0.9393213500976563, 0.9382932739257812, 0.9387704467773438, 0.9389281005859375, 0.9396756591796875, 0.9391441650390625, 0.9385687255859375, 0.9398343505859375, 0.93823486328125, 0.9392557983398437, 0.9380843505859375, 0.939009033203125, 0.9379246215820313, 0.938049560546875, 0.93809765625, 0.9396961059570312, 0.938603515625, 0.9394022216796875, 0.9386465454101562, 1.9507159423828124, 0.9391738891601562, 0.9382543334960938, 0.9382963256835938, 0.9384693603515625, 0.9383526611328125, 0.9399357299804687, 0.93943603515625, 0.9397831420898437, 0.93865673828125, 0.9389649658203125, 0.938598388671875, 0.9392752685546875, 0.938119140625, 0.9378037719726563, 0.9382062377929687, 0.938081298828125, 0.9376358642578125, 0.9382072143554687, 0.9395159301757813, 0.9385308227539062, 0.93964697265625, 0.9388390502929688, 0.9387509765625, 0.9381334838867188, 0.938144775390625, 0.9383526000976562, 0.9388922729492187, 0.9388226318359375, 0.9387222900390625, 0.9390540771484375, 0.9390018310546875, 0.9392066650390625, 0.9401517944335938, 0.9398220825195313, 0.938925048828125, 0.938987548828125, 0.9386342163085938, 0.9397770385742188, 0.9390172119140625, 0.9395302124023438, 0.9389547729492187, 0.9400145874023438, 0.9384099731445312, 0.939598876953125, 0.9384038696289062, 0.9381325073242187, 0.9380997314453124, 0.9389240112304688, 0.938376220703125, 0.9384140625, 0.9392455444335938, 0.9389741821289063, 0.9392086791992188, 0.9387509765625, 0.9395404663085938, 0.93880322265625, 0.938650634765625, 0.9383117065429688, 0.9391912841796874, 0.938625, 0.93869775390625, 0.9389117431640625, 1.9512227783203124, 0.9401149291992188, 0.938693603515625, 0.9384498901367188, 0.9391452026367187, 0.9389598999023437, 0.93937255859375, 0.9388656616210938, 0.939441162109375, 0.938603515625, 0.9388677368164062, 0.9383546752929688, 0.9392056274414062, 0.938287109375, 0.9377484741210937, 0.9378836669921875, 0.93793896484375, 0.9375641479492187, 0.9385707397460937, 0.9388339233398437, 0.938630126953125, 0.9394104614257812, 0.9392772827148438, 0.9391390991210937, 0.938872802734375, 0.9386659545898437, 0.9385103149414062, 0.9388339233398437, 0.9386782836914063, 0.9387857666015625, 0.9381437377929688, 0.9386362915039063, 0.9390151977539063, 0.939298828125, 0.9390264282226563, 0.9383864135742187, 0.9382738037109375, 0.9385748291015625, 0.938534912109375, 0.938250244140625, 0.9396162719726563, 0.9382584228515625, 0.9384591064453125, 0.9386362915039063, 0.9391001586914063, 0.9379573974609375, 0.9382379760742188, 0.938408935546875, 0.9383331909179687, 0.9385953369140625, 0.9381273803710938, 0.9385042114257812, 0.9381089477539063, 0.9387110595703125, 0.9393162231445312, 0.9386168823242188, 0.9382542724609375, 0.9378928833007812, 0.9382205200195313, 0.9389230346679688, 0.9386598510742188, 0.939430908203125, 0.9383444213867187, 1.9508203125, 0.9386854248046875, 0.9382471923828125, 0.938323974609375, 0.9382011108398437, 0.9390366821289062, 0.93852978515625, 0.9385359497070312, 0.9398814697265625, 0.9387048950195312, 0.9394933471679687, 0.939357177734375, 0.9405009765625, 0.9390079956054688, 0.9383884887695313, 0.9386322021484375, 0.9385226440429687, 0.9374658813476563, 0.9394872436523437, 0.9389967651367187, 0.9386393432617187, 0.939273193359375, 0.939357177734375, 0.9387940063476562, 0.9389414672851563, 0.9389557495117188, 0.9387868041992188, 0.9400811767578126, 0.9384970092773437, 0.9387786254882813, 0.938735595703125, 0.938808349609375, 0.939025390625, 0.9387807006835938, 0.9382564086914063, 0.9380014038085938, 0.9381089477539063, 0.9382532958984375, 0.938630126953125, 0.938618896484375, 0.938883056640625, 0.9382164306640625, 0.938872802734375, 0.9385338745117188, 0.9390315551757813, 0.9384253540039063, 0.938555419921875, 0.9387694091796875, 0.9386690673828125, 0.9383936157226562, 0.9389619140625, 0.9384304809570313, 0.9386782836914063, 0.9390888671875, 0.939125732421875, 0.9383280639648437, 0.938625, 0.9386373291015625, 0.9384110107421875, 0.9388840942382812, 0.9385308227539062, 0.9386843872070313, 0.9381119995117188, 1.9515452880859374, 0.9394534301757812, 0.93916259765625, 0.9390397338867188, 0.9386148071289062, 0.9386332397460937, 0.9383301391601563, 0.9385236206054688, 0.9390694580078125, 0.9383864135742187, 0.9387816772460937, 0.938566650390625, 0.9386209106445312, 0.938498046875, 0.938482666015625, 0.9383372802734375, 0.938598388671875, 0.9374115600585937, 0.9391605834960938, 0.9391114501953125, 0.9388984375, 0.9407098999023438, 0.9396029663085937, 0.9391421508789063, 0.938498046875, 0.9380679931640625, 0.9384048461914063, 0.93968896484375, 0.9385738525390624, 0.939087890625, 0.9386076049804688, 0.93890869140625, 0.938851318359375, 0.9392803955078125, 0.93926806640625, 0.9393264770507812, 0.9389168701171875, 0.9393695068359375, 0.938694580078125, 0.9390172119140625, 0.93890966796875, 0.9383895263671875, 0.9392957153320313, 0.9389629516601562, 0.938323974609375, 0.9382389526367187, 0.9378775024414062, 0.9381263427734375, 0.9386209106445312, 0.9382277221679688, 0.9391841430664063, 0.938829833984375, 0.938335205078125, 0.9392312622070312, 0.9389691162109375, 0.9388851318359375, 0.9389680786132812, 0.9386978149414062, 0.9398670654296875, 0.9389967651367187, 0.93882470703125, 0.938935302734375, 0.9384898681640625]",tokens/s,1.0489793487762582,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1610.354688,2254.962688,0.0,1608.515584,1463.6928,s,10,1.209205665588379,0.1209205665588379,0.0011005379634326184,0.1205662727355957,0.1215480598449707,0.12274063606262207,0.12369469703674317,"[0.12393321228027344, 0.12128304290771484, 0.12008969879150391, 0.12004278564453125, 0.12018284606933594, 0.12025001525878906, 0.12034620666503906, 0.12078633880615235, 0.12113209533691406, 0.121159423828125]",tokens/s,2117.0922969124094,kWh,1.4171419990441157e-06,7.762810317477228e-07,6.305967346357218e-06,8.499390377149056e-06,tokens/kWh,30119807.261502665,MB,1610.952704,2254.962688,0.0,1608.515584,1560.974848,s,10,70.14807763671874,7.0148077636718735,0.00472223733563277,7.012995361328125,7.0220806640625,7.0226084472656245,7.023030673828125,"[7.017875, 7.0128837890625, 7.0119638671875, 7.01192919921875, 7.01310693359375, 7.0105234375, 7.0078984375, 7.02196337890625, 7.01679736328125, 7.02313623046875]",tokens/s,8.981001635748731,kWh,8.273579535030184e-05,4.5345370896494765e-05,0.0003660393860852451,0.0004941205523320418,tokens/kWh,127499.25034015773,,s,629,71.1210855636597,0.11307008833650187,0.01439647456429658,0.11126681518554687,0.11167272796630859,0.11198136138916015,0.23187720581054685,"[0.11166719818115234, 0.11130470275878906, 0.11149517059326172, 0.11213107299804688, 0.11133849334716797, 0.11115827178955077, 0.11096371459960938, 0.11128832244873046, 0.11117977905273438, 0.11118182373046875, 0.11122688293457031, 0.1112647705078125, 0.11119513702392578, 0.11100774383544922, 0.111087646484375, 0.11108963012695312, 0.11133439636230469, 0.11160678100585937, 0.11127910614013672, 0.1123737564086914, 0.1114972152709961, 0.11121971130371094, 0.11113676452636718, 0.1111009292602539, 0.11132621002197265, 0.11129344177246094, 0.11116851043701172, 0.11124531555175782, 0.1112442855834961, 0.11126681518554687, 0.11126579284667969, 0.11136102294921875, 0.11114291381835938, 0.11164262390136719, 0.1114081268310547, 0.11137843322753906, 0.11116134643554687, 0.11157810974121093, 0.1113333740234375, 0.11127091217041016, 0.11125965118408203, 0.11129241943359375, 0.11120745849609374, 0.11137942504882813, 0.11125247955322265, 0.11137229156494141, 0.11129138946533203, 0.11126988983154297, 0.11214335632324218, 0.11184333038330078, 0.11213414764404298, 0.11166515350341796, 0.11144300842285157, 0.11137529754638673, 0.11148185729980468, 0.11138457489013671, 0.11156582641601563, 0.1114777603149414, 0.1114081268310547, 0.11125965118408203, 0.11127603149414063, 0.11128425598144531, 0.2332733154296875, 0.11126271820068359, 0.11134259033203125, 0.1112136001586914, 0.11113475036621094, 0.1112872314453125, 0.11119516754150391, 0.11118179321289062, 0.11123712158203125, 0.11110195159912109, 0.11118694305419922, 0.11108454132080078, 0.11117158508300781, 0.1111357421875, 0.1112248306274414, 0.11135078430175781, 0.1113016357421875, 0.11121049499511719, 0.11146649932861329, 0.11143270111083985, 0.11139379119873047, 0.11125862121582031, 0.11111116790771484, 0.11113369750976562, 0.11115213012695313, 0.11124736022949219, 0.11156070709228516, 0.11131289672851563, 0.11127193450927735, 0.11115929412841796, 0.11136819458007813, 0.11125145721435546, 0.11122073364257813, 0.11129753875732422, 0.11136614227294922, 0.11128217315673829, 0.11125145721435546, 0.11142047882080078, 0.11116230773925781, 0.11125759887695312, 0.1110487060546875, 0.11122994995117187, 0.11132518768310547, 0.1113026885986328, 0.11126576232910156, 0.11120947265625, 0.11332198333740234, 0.11140608215332032, 0.11135590362548828, 0.11126579284667969, 0.11120845031738281, 0.11143065643310547, 0.1113917465209961, 0.11126988983154297, 0.11125350189208984, 0.11131187438964844, 0.11131903839111328, 0.11127091217041016, 0.11129446411132812, 0.11129446411132812, 0.11138969421386719, 0.1114603500366211, 0.11127808380126954, 0.23181004333496094, 0.11102105712890625, 0.1111910400390625, 0.11175424194335938, 0.11164672088623047, 0.11138969421386719, 0.11125247955322265, 0.11111219024658203, 0.11101798248291016, 0.11089920043945313, 0.11102413177490235, 0.11136723327636719, 0.1116630401611328, 0.11121561431884766, 0.11134259033203125, 0.11122585296630859, 0.11111116790771484, 0.11116339111328125, 0.11112857818603515, 0.11112242889404297, 0.11111014556884766, 0.11112966156005859, 0.11107218933105469, 0.11174297332763672, 0.11127603149414063, 0.11119821166992187, 0.11121663665771485, 0.11118080139160157, 0.11127808380126954, 0.1111583023071289, 0.11111011505126953, 0.11111014556884766, 0.11128832244873046, 0.11131903839111328, 0.1111562271118164, 0.11147058868408204, 0.11126681518554687, 0.1112279052734375, 0.11108051300048828, 0.11122681427001953, 0.11114086151123047, 0.11119923400878906, 0.11140201568603515, 0.11115004730224609, 0.1115494384765625, 0.11138969421386719, 0.11126886749267578, 0.11192524719238281, 0.111351806640625, 0.11120127868652344, 0.11122994995117187, 0.11163238525390624, 0.11214540863037109, 0.11136921691894532, 0.11118592071533204, 0.11130879974365235, 0.11120845031738281, 0.11130470275878906, 0.1112442855834961, 0.11167743682861328, 0.11148799896240234, 0.11126988983154297, 0.11132012939453124, 0.23181817626953125, 0.11116236877441406, 0.11126585388183594, 0.11114182281494141, 0.11116748809814453, 0.1111695327758789, 0.11111014556884766, 0.11164057922363281, 0.11159449768066407, 0.11120127868652344, 0.11121766662597657, 0.11119206237792968, 0.11146649932861329, 0.11107635498046875, 0.11124121856689453, 0.11109375762939454, 0.11119718170166015, 0.11117465972900391, 0.11109478759765624, 0.11113471984863281, 0.11132415771484375, 0.11108249664306641, 0.11122585296630859, 0.11124838256835938, 0.11113267517089843, 0.11119821166992187, 0.11118386840820313, 0.11122380828857421, 0.11127808380126954, 0.11117056274414062, 0.11127500915527344, 0.111388671875, 0.11147878265380859, 0.111246337890625, 0.11121868896484376, 0.1112616958618164, 0.1112811508178711, 0.11174400329589844, 0.11160575866699218, 0.11130368041992188, 0.11123001861572265, 0.11139884948730469, 0.11166207885742188, 0.11131187438964844, 0.11219455718994141, 0.11126377868652344, 0.11129955291748046, 0.11126886749267578, 0.11122278594970703, 0.11126886749267578, 0.1113169937133789, 0.11135282897949218, 0.11117362976074219, 0.1112442855834961, 0.11130060577392578, 0.11121766662597657, 0.11142655944824219, 0.111246337890625, 0.11133132934570313, 0.11140409851074219, 0.11129542541503906, 0.11128422546386718, 0.11133952331542969, 0.23190016174316405, 0.11121459197998047, 0.11119718170166015, 0.11123302459716797, 0.11124736022949219, 0.1111551971435547, 0.1117624282836914, 0.11134259033203125, 0.11116851043701172, 0.11123609924316406, 0.11112960052490234, 0.11113983917236328, 0.11123097229003906, 0.11135692596435547, 0.11133030700683594, 0.1111756820678711, 0.11099750518798829, 0.11165388488769531, 0.11119923400878906, 0.11110399627685547, 0.11134361267089844, 0.11165695953369141, 0.11130470275878906, 0.11117874908447266, 0.11137843322753906, 0.11113881683349609, 0.11112754821777343, 0.11104460906982422, 0.11110195159912109, 0.1110456314086914, 0.11133542633056641, 0.11121971130371094, 0.11121459197998047, 0.11156275177001954, 0.11139590454101563, 0.11135379028320312, 0.11121868896484376, 0.11118592071533204, 0.11130982208251954, 0.11158528137207031, 0.11178291320800782, 0.11122176361083984, 0.11130470275878906, 0.11126681518554687, 0.111246337890625, 0.11125452423095702, 0.11126374053955078, 0.11133030700683594, 0.11145932769775391, 0.11136000061035156, 0.11132621002197265, 0.11127705383300782, 0.11152998352050782, 0.1112442855834961, 0.11130265808105469, 0.11128729248046874, 0.11133952331542969, 0.11251200103759766, 0.11131084442138672, 0.1113221435546875, 0.11128931427001953, 0.11129036712646484, 0.11130879974365235, 0.2321817626953125, 0.11112652587890624, 0.11125247955322265, 0.11120230102539062, 0.11118284606933594, 0.11110912322998047, 0.11104768371582031, 0.11113471984863281, 0.11123814392089844, 0.11114495849609375, 0.11111321258544922, 0.11121971130371094, 0.11117874908447266, 0.11118592071533204, 0.11127603149414063, 0.11113369750976562, 0.11118796539306641, 0.11104364776611328, 0.11120121765136719, 0.11109683227539062, 0.11134873962402343, 0.11129241943359375, 0.11124531555175782, 0.11129138946533203, 0.1111695327758789, 0.11111936187744141, 0.11120230102539062, 0.11098214721679688, 0.11109478759765624, 0.11121772766113282, 0.11126573181152344, 0.11151564788818359, 0.11180134582519531, 0.1111551971435547, 0.11124018859863281, 0.11106508636474609, 0.11120845031738281, 0.11125452423095702, 0.11243007659912109, 0.11165286254882813, 0.11120947265625, 0.11119308471679687, 0.11124224090576172, 0.11133747100830078, 0.1112647705078125, 0.11123200225830078, 0.11110912322998047, 0.11123097229003906, 0.11112140655517579, 0.11116134643554687, 0.11120230102539062, 0.11124838256835938, 0.11131597137451171, 0.11115929412841796, 0.11126681518554687, 0.11124326324462891, 0.11124940490722657, 0.11136819458007813, 0.1113364486694336, 0.11219558715820313, 0.11167436981201172, 0.11133542633056641, 0.11132006072998046, 0.23260365295410157, 0.1113016357421875, 0.11109069061279297, 0.11130675506591797, 0.11117772674560547, 0.11111116790771484, 0.11099852752685548, 0.11096575927734376, 0.11118592071533204, 0.11106201934814453, 0.11124940490722657, 0.1109964828491211, 0.11128729248046874, 0.11114086151123047, 0.11112041473388672, 0.11092476654052734, 0.11116646575927734, 0.11122585296630859, 0.11110297393798828, 0.11115929412841796, 0.1110282211303711, 0.11134361267089844, 0.11134054565429688, 0.11161497497558594, 0.11118796539306641, 0.11125145721435546, 0.1112965087890625, 0.11122898864746093, 0.11125344085693359, 0.11107839965820313, 0.1114081268310547, 0.11136511993408203, 0.11113164520263671, 0.11111936187744141, 0.11107635498046875, 0.11111628723144532, 0.1109964828491211, 0.11101900482177735, 0.11115827178955077, 0.11171942138671875, 0.11166413116455078, 0.11109580993652343, 0.11116649627685547, 0.11109782409667969, 0.11119821166992187, 0.11115110778808594, 0.11119206237792968, 0.11109580993652343, 0.1113333740234375, 0.111388671875, 0.11111014556884766, 0.11112242889404297, 0.11119926452636719, 0.11117769622802734, 0.11110707092285156, 0.11125350189208984, 0.111246337890625, 0.11176140594482421, 0.11179315185546874, 0.11137229156494141, 0.1111234588623047, 0.1113917465209961, 0.11129138946533203, 0.23285554504394532, 0.11228876495361328, 0.1119969253540039, 0.11132012939453124, 0.1113087387084961, 0.1110118408203125, 0.11107635498046875, 0.11119821166992187, 0.11114905548095703, 0.11114291381835938, 0.11154227447509765, 0.111283203125, 0.11145011138916015, 0.11127091217041016, 0.11112140655517579, 0.11115110778808594, 0.11166719818115234, 0.11149517059326172, 0.11157708740234375, 0.11116441345214843, 0.11162931060791016, 0.11222220611572266, 0.11127398681640625, 0.11119721221923828, 0.11121660614013672, 0.11111833953857422, 0.11153817749023437, 0.11167436981201172, 0.11123817443847656, 0.11156681823730469, 0.11157299041748046, 0.11123200225830078, 0.11113267517089843, 0.11120339202880859, 0.11119302368164062, 0.11129446411132812, 0.11130265808105469, 0.11117874908447266, 0.11140914916992188, 0.11178803253173829, 0.11143987274169923, 0.111388671875, 0.11134054565429688, 0.11126271820068359, 0.11172557067871093, 0.11178189086914063, 0.11163648223876953, 0.11228876495361328, 0.11165491485595704, 0.11137843322753906, 0.11123712158203125, 0.11235433959960937, 0.11163645172119141, 0.11138969421386719, 0.11162419128417969, 0.11138253021240234, 0.11193548583984375, 0.11135289764404296, 0.11133433532714844, 0.11140198516845704, 0.11156582641601563, 0.11123814392089844, 0.11139584350585938, 0.23310745239257813, 0.11154329681396484, 0.11122585296630859, 0.11103334045410156, 0.11112242889404297, 0.11111321258544922, 0.11110502624511719, 0.11125759887695312, 0.11132006072998046, 0.11105689239501954, 0.11160371398925781, 0.11119308471679687, 0.11138662719726562, 0.11129039764404297, 0.11160777282714844, 0.1111203842163086, 0.11147058868408204, 0.11210137939453126, 0.11113471984863281, 0.11226214599609376, 0.11134054565429688, 0.11112960052490234, 0.11109683227539062, 0.11101696014404297, 0.11132927703857422, 0.11150438690185546, 0.11192934417724609, 0.11167231750488281, 0.11156684875488282, 0.11118489837646485, 0.11123814392089844, 0.11113683319091797, 0.11117254638671875, 0.11120333099365234, 0.11124940490722657, 0.11127398681640625, 0.11138355255126953, 0.11188531494140624, 0.11153510284423829, 0.11116134643554687, 0.11122892761230468, 0.11117874908447266, 0.11126892852783203, 0.11124422454833985, 0.11132723236083984, 0.111246337890625, 0.1125038070678711, 0.11145523071289062, 0.1111695327758789, 0.11121663665771485, 0.11127193450927735, 0.11120537567138672, 0.11170098876953125, 0.11128729248046874, 0.11114701080322266, 0.112110595703125, 0.11148185729980468, 0.11123916625976563, 0.1111910400390625, 0.11130879974365235, 0.11141222381591796, 0.11127603149414063, 0.11140608215332032, 0.23328665161132814, 0.11162009429931641, 0.11125350189208984, 0.11111116790771484, 0.11114086151123047, 0.1111234588623047, 0.11114803314208985, 0.11112454223632813, 0.11118073272705079, 0.11184435272216797, 0.11149311828613281, 0.1110835189819336, 0.11113676452636718, 0.11192729949951172, 0.11148902130126953, 0.1111900177001953, 0.1112074203491211, 0.11114393615722656, 0.11168256378173828, 0.11129446411132812, 0.11139584350585938, 0.11125043487548827, 0.11120435333251953, 0.11126886749267578, 0.11185561370849609, 0.11130675506591797, 0.11252838134765625, 0.11186688232421875, 0.11209420776367188, 0.11134873962402343, 0.11136819458007813, 0.11132006072998046, 0.11117056274414062, 0.1115688934326172, 0.111425537109375, 0.11140096282958985, 0.11195801544189453, 0.11133439636230469, 0.11169280242919923, 0.11158016204833984, 0.11150745391845703, 0.11149209594726563, 0.11174915313720703, 0.11140297698974609, 0.11168153381347656, 0.11251609802246093, 0.11149823760986328, 0.1112125473022461, 0.11124947357177735, 0.11128211212158202, 0.11113369750976562, 0.11132518768310547, 0.11279666900634766, 0.11159654235839844, 0.11184844970703126, 0.11136102294921875, 0.11125350189208984, 0.11140096282958985, 0.11135078430175781, 0.11131084442138672, 0.11126579284667969, 0.11146348571777344, 0.111246337890625]",tokens/s,8.844071979708314,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2029.023232,5480.382464,0.0,4833.93536,4503.282688,s,10,5.706882629394531,0.5706882629394532,0.0016266379572605122,0.5707381896972656,0.5725719726562499,0.5726634887695312,0.5727367016601562,"[0.5727550048828125, 0.5716815185546875, 0.5690197143554687, 0.56878515625, 0.5694818725585937, 0.5685383911132813, 0.5697948608398438, 0.5724900512695312, 0.571784423828125, 0.5725516357421875]",tokens/s,448.58115476462876,kWh,6.716496138660996e-06,3.680318861643172e-06,3.126274105956043e-05,4.16595560598646e-05,tokens/kWh,6145048.680598735,MB,2029.023232,5480.382464,0.0,4833.93536,4688.699392,s,10,334.41517578125,33.441517578125,0.005471176707679699,33.439892578125,33.450382421875,33.4504509765625,33.4505058203125,"[33.44433203125, 33.4401328125, 33.436796875, 33.43383203125, 33.43801953125, 33.4503671875, 33.45051953125, 33.436765625, 33.4447578125, 33.43965234375]",tokens/s,1.8838857971329028,kWh,0.0003948281717245225,0.0002164000882931032,0.0018034092266768405,0.0024146374866944663,tokens/kWh,26090.872997355913,,s,629,339.0184661254883,0.5389800733314599,0.06780085317370947,0.5307340698242188,0.5313611816406251,0.5316872192382812,1.099913408203125,"[0.5303726196289062, 0.5304791259765625, 0.5303756713867187, 0.5306491088867188, 0.5304463500976563, 0.5305681762695312, 0.5304903564453125, 0.530681884765625, 0.5307289428710937, 0.530619384765625, 0.5308334350585937, 0.53085693359375, 0.5303705444335938, 0.5305958251953125, 0.5302774047851563, 0.5305548706054688, 0.5302968139648437, 0.530682861328125, 0.5306572875976563, 0.53058251953125, 0.5303828735351562, 0.5305906982421875, 0.53028759765625, 0.5303807983398438, 0.5304647827148438, 0.5309767456054687, 0.5310658569335938, 0.5310986328125, 0.5302282104492188, 0.5309931640625, 0.5306439819335937, 0.5309173583984375, 0.53104638671875, 0.5319270629882813, 0.531589111328125, 0.5317703857421875, 0.5316536254882812, 0.5319987182617187, 0.53172021484375, 0.53193115234375, 0.5317007446289063, 0.5311692504882812, 0.5306941528320313, 0.5313402709960937, 0.5311119384765625, 0.5320499267578125, 0.5308436279296875, 0.5309603881835937, 0.53089794921875, 0.5311702880859375, 0.5305272216796875, 0.5305426025390625, 0.5305640869140625, 0.5306859741210938, 0.5304647827148438, 0.5306275634765625, 0.5305589599609375, 0.5307914428710937, 0.5308579711914062, 0.530808837890625, 0.5308436279296875, 0.5311334228515625, 1.1034122314453125, 0.531583984375, 0.5308897094726562, 0.5307801513671875, 0.530830322265625, 0.5305958251953125, 0.5306961669921875, 0.5304053955078125, 0.53068798828125, 0.5302650756835937, 0.5304074096679687, 0.5301115112304687, 0.5304862670898437, 0.5300991821289063, 0.5306500854492188, 0.5302988891601562, 0.5305692138671875, 0.530218994140625, 0.5305047607421876, 0.5306592407226562, 0.5311631469726562, 0.5304155883789062, 0.5312952270507812, 0.5305374755859374, 0.530766845703125, 0.5304176635742187, 0.5306275634765625, 0.5305128784179688, 0.5309767456054687, 0.5305927734375, 0.5308180541992188, 0.5308170166015626, 0.5308436279296875, 0.530608154296875, 0.5307197265625, 0.5304832153320312, 0.5310269165039062, 0.530788330078125, 0.531056640625, 0.5312041015625, 0.5311282958984375, 0.5308856201171875, 0.5310484619140625, 0.5306941528320313, 0.5308334350585937, 0.5305599975585937, 0.5322680053710938, 0.530998291015625, 0.5309849853515625, 0.5308098754882813, 0.5307525024414063, 0.53083544921875, 0.5311477661132813, 0.5306255493164063, 0.5309736938476562, 0.531083251953125, 0.5314232177734375, 0.5308733520507812, 0.531041259765625, 0.530524169921875, 0.531251220703125, 0.5304913330078125, 0.5308006591796876, 1.0999736328125, 0.530502685546875, 0.5307003173828125, 0.5304801025390625, 0.5305569458007813, 0.5305374755859374, 0.5308016357421875, 0.5303275756835938, 0.5305835571289063, 0.5303971557617188, 0.5308845825195313, 0.5308651733398437, 0.5319014282226563, 0.5306552124023437, 0.5313453979492188, 0.5305538330078124, 0.5305252075195312, 0.5303152465820312, 0.5307760620117188, 0.5310126342773438, 0.5308641357421875, 0.5306593017578125, 0.530639892578125, 0.5305968627929688, 0.5306808471679687, 0.5308436279296875, 0.5307340698242188, 0.53047705078125, 0.5306808471679687, 0.5303255004882812, 0.53110986328125, 0.5310003051757812, 0.5306552124023437, 0.5305599975585937, 0.5308487548828125, 0.5304658203125, 0.5309296875, 0.5307279663085938, 0.5307545776367187, 0.5304330444335937, 0.5307012939453125, 0.530713623046875, 0.5310167236328125, 0.5309470825195313, 0.5307689208984375, 0.5305252075195312, 0.5307125854492187, 0.5308221435546875, 0.5308416137695312, 0.530892822265625, 0.5315543212890625, 0.5305845947265625, 0.531040283203125, 0.530492431640625, 0.5305466918945313, 0.5306337280273438, 0.530735107421875, 0.5310238647460938, 0.5306429443359375, 0.5305426025390625, 0.5307340698242188, 0.530535400390625, 0.5311631469726562, 1.09973193359375, 0.53024560546875, 0.53064501953125, 0.530534423828125, 0.5306911010742188, 0.5307279663085938, 0.5305538330078124, 0.5307115478515625, 0.5306583251953125, 0.5304094848632812, 0.5305528564453125, 0.5303408813476562, 0.530830322265625, 0.5302855834960938, 0.5308221435546875, 0.5302835083007813, 0.5305282592773437, 0.5302476806640625, 0.5306634521484375, 0.5304013061523437, 0.5309204711914063, 0.5303469848632812, 0.5309839477539062, 0.5302742919921875, 0.53121435546875, 0.5306419067382813, 0.5312420043945313, 0.5304524536132813, 0.5306388549804687, 0.5301882934570312, 0.5307074584960938, 0.5305753784179688, 0.5308630981445313, 0.530572265625, 0.5305016479492187, 0.5303705444335938, 0.5305282592773437, 0.5302476806640625, 0.5306654663085938, 0.5310330810546875, 0.531125244140625, 0.530513916015625, 0.530820068359375, 0.5313054809570312, 0.5315963134765626, 0.5308518676757813, 0.5307258911132813, 0.5309255981445312, 0.5307606811523438, 0.5307371215820312, 0.53144677734375, 0.5305640869140625, 0.5306583251953125, 0.5305569458007813, 0.53089794921875, 0.5304473876953125, 0.53081396484375, 0.5305446166992187, 0.5308446655273438, 0.5305866088867187, 0.5311211547851562, 0.5311539306640625, 0.5308313598632812, 1.1005552978515625, 0.5304330444335937, 0.5307955322265625, 0.530313232421875, 0.5304658203125, 0.5306838989257813, 0.5304944458007812, 0.5302742919921875, 0.5305692138671875, 0.530356201171875, 0.53049755859375, 0.5303019409179688, 0.5304391479492188, 0.5303101196289063, 0.5306808471679687, 0.5305149536132813, 0.5315686645507812, 0.530671630859375, 0.5310003051757812, 0.5307750244140625, 0.5311651611328125, 0.5306009521484375, 0.5306388549804687, 0.5303910522460937, 0.5305743408203125, 0.53049755859375, 0.5307238159179688, 0.5304903564453125, 0.5306675415039063, 0.5307310180664062, 0.5306060791015625, 0.5304954833984376, 0.5306849365234375, 0.5304647827148438, 0.5310084838867187, 0.531357666015625, 0.5308907470703125, 0.5313607788085938, 0.5305436401367187, 0.5303142700195312, 0.5307197265625, 0.5305866088867187, 0.5307258911132813, 0.5306911010742188, 0.530746337890625, 0.530513916015625, 0.5311181030273437, 0.5308856201171875, 0.5307627563476562, 0.5306132202148437, 0.53115185546875, 0.5308733520507812, 0.5311539306640625, 0.5306583251953125, 0.5317621459960937, 0.5305640869140625, 0.5311201171875, 0.5317539672851562, 0.5310535888671875, 0.5312788696289062, 0.5310422973632812, 0.5307166748046875, 0.5311488037109375, 1.099758544921875, 0.5304155883789062, 0.5309942016601562, 0.5306316528320313, 0.5310044555664063, 0.5309592895507812, 0.5309296875, 0.531293212890625, 0.5310361328125, 0.5315983276367188, 0.5309173583984375, 0.53089892578125, 0.5309368286132813, 0.5309173583984375, 0.5310187377929687, 0.53051904296875, 0.5308784790039063, 0.53064501953125, 0.530935791015625, 0.5305640869140625, 0.53085595703125, 0.5304842529296875, 0.5306941528320313, 0.5305200805664062, 0.5306286010742187, 0.5310453491210938, 0.5314888916015625, 0.5309019165039063, 0.5314559936523438, 0.5304483642578125, 0.5311907958984375, 0.5310607299804687, 0.53081396484375, 0.530460693359375, 0.5306798095703125, 0.5305108642578125, 0.5307340698242188, 0.5307310180664062, 0.5322465209960937, 0.5307340698242188, 0.5311016845703125, 0.5303255004882812, 0.53104541015625, 0.5311344604492187, 0.5315880737304688, 0.5312041015625, 0.5313812255859375, 0.5315963134765626, 0.5314437255859376, 0.5312255859375, 0.5312286987304687, 0.5308856201171875, 0.5311661987304688, 0.5306419067382813, 0.5310013427734375, 0.5312112426757812, 0.5314898071289063, 0.5305784301757812, 0.5307023315429688, 0.5308221435546875, 0.5309501342773437, 0.5306531982421875, 0.5310136108398438, 1.101791259765625, 0.5311979370117188, 0.5310494995117188, 0.5309296875, 0.5306951904296875, 0.5305620727539062, 0.53075146484375, 0.5304832153320312, 0.5304832153320312, 0.5304678344726562, 0.5305579223632813, 0.5303460083007813, 0.530535400390625, 0.53030810546875, 0.5306685180664062, 0.530313232421875, 0.530567138671875, 0.5305947875976562, 0.5310105590820312, 0.5304862670898437, 0.5315010375976562, 0.5307781372070313, 0.5309634399414063, 0.5304596557617187, 0.5305436401367187, 0.5304074096679687, 0.5306552124023437, 0.5303705444335938, 0.5305252075195312, 0.5304309692382813, 0.530587646484375, 0.5305272216796875, 0.5306224365234375, 0.5303101196289063, 0.5306122436523437, 0.5303859252929688, 0.53157373046875, 0.5317867431640625, 0.5306838989257813, 0.531019775390625, 0.5308037109375, 0.5315563354492188, 0.5318829956054687, 0.5318727416992187, 0.531968017578125, 0.5313167114257813, 0.5311610717773437, 0.5309112548828125, 0.5310955810546875, 0.5309736938476562, 0.531399658203125, 0.5308375244140625, 0.5309644775390625, 0.5308323974609375, 0.5311262817382812, 0.5311344604492187, 0.5315696411132812, 0.5313095703125, 0.5322188720703125, 0.5319075927734375, 0.5321328735351563, 0.53180517578125, 0.5310341186523437, 1.1037552490234375, 0.5302538452148438, 0.53066650390625, 0.5303644409179687, 0.530545654296875, 0.5308262329101563, 0.5305938110351562, 0.5302937622070313, 0.5309501342773437, 0.5304954833984376, 0.5306388549804687, 0.5307566528320312, 0.531390380859375, 0.5309030151367188, 0.5311232299804688, 0.5304494018554687, 0.5305374755859374, 0.5301544799804687, 0.5307739868164062, 0.5302302856445312, 0.5306542358398437, 0.5302691650390625, 0.5304893188476563, 0.530271240234375, 0.5304873046875, 0.530249755859375, 0.5306429443359375, 0.5304688720703125, 0.5305733032226563, 0.5302630615234375, 0.5308067626953125, 0.5304801025390625, 0.5307535400390625, 0.5306204223632812, 0.5305897216796875, 0.5304873046875, 0.5308692626953125, 0.5305548706054688, 0.531114013671875, 0.5308733520507812, 0.531294189453125, 0.5308948364257813, 0.53117236328125, 0.5310709838867187, 0.5314703369140625, 0.5308497924804687, 0.5314765014648437, 0.530946044921875, 0.5310259399414062, 0.5308795166015625, 0.5310228271484375, 0.5311436767578125, 0.530756591796875, 0.5304422607421875, 0.5306869506835937, 0.5305169677734375, 0.53072998046875, 0.5304780883789062, 0.5309317016601562, 0.5314334716796875, 0.5309531860351563, 0.5307371215820312, 0.5311385498046876, 1.1027271728515624, 0.5304412231445312, 0.5307801513671875, 0.5308590087890624, 0.5311181030273437, 0.5311273193359375, 0.5310904541015625, 0.53079345703125, 0.5309389038085938, 0.5304013061523437, 0.5307391967773437, 0.5304248046875, 0.530746337890625, 0.5302958374023438, 0.531314697265625, 0.5305169677734375, 0.5308467407226563, 0.5305415649414063, 0.5307044067382812, 0.5304258422851562, 0.5306500854492188, 0.530608154296875, 0.5310791625976562, 0.5308784790039063, 0.5317447509765625, 0.530534423828125, 0.5315819702148438, 0.5309255981445312, 0.531177490234375, 0.53104638671875, 0.5305394897460938, 0.530318359375, 0.5304934692382812, 0.5302620239257813, 0.5305006103515625, 0.5303418579101562, 0.530535400390625, 0.5304043579101563, 0.5306634521484375, 0.5306224365234375, 0.5308016357421875, 0.5306849365234375, 0.5312051391601562, 0.53136279296875, 0.5315717163085938, 0.5321359252929687, 0.5316669311523438, 0.5309634399414063, 0.5312890625, 0.5310740356445313, 0.5311795043945312, 0.531040283203125, 0.531083251953125, 0.53062451171875, 0.5307965698242187, 0.5304965209960938, 0.5307801513671875, 0.530714599609375, 0.5308016357421875, 0.5307023315429688, 0.5310259399414062, 0.5310658569335938, 0.5317560424804687, 1.103698974609375, 0.5303838500976562, 0.5308118896484375, 0.5313546142578125, 0.5313822631835937, 0.5304340209960937, 0.5307258911132813, 0.5303450317382813, 0.530662353515625, 0.5305333862304688, 0.5306746826171875, 0.5304586181640625, 0.5306736450195313, 0.5304258422851562, 0.5309183959960937, 0.53065625, 0.5313136596679687, 0.5311395874023438, 0.5308251953125, 0.5308211059570312, 0.530787353515625, 0.530513916015625, 0.5309204711914063, 0.5309265747070312, 0.5309603881835937, 0.5305927734375, 0.5307289428710937, 0.5303971557617188, 0.5306982421875, 0.5306787719726562, 0.531040283203125, 0.5307310180664062, 0.530882568359375, 0.5309470825195313, 0.5306675415039063, 0.5309173583984375, 0.5313720092773437, 0.5311057739257813, 0.5313310546875, 0.5306521606445312, 0.5313228759765625, 0.5306849365234375, 0.530766845703125, 0.5306306762695312, 0.5308231811523437, 0.5304586791992187, 0.5306050415039063, 0.530514892578125, 0.5307535400390625, 0.5304391479492188, 0.5307156372070313, 0.530545654296875, 0.5310863647460937, 0.5305620727539062, 0.530904052734375, 0.53064599609375, 0.5309317016601562, 0.53065625, 0.5311386108398437, 0.5306500244140625, 0.5308600463867188, 0.5306286010742187, 0.5308651733398437]",tokens/s,1.8553561615347953,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1662.808064,5516.034048,0.0,4869.586944,4743.593472,s,10,6.1237835083007806,0.6123783508300781,0.001253925145814982,0.6120383911132812,0.6140244384765625,0.6141366088867187,0.6142263452148438,"[0.6138787841796876, 0.614248779296875, 0.6107066650390625, 0.61129736328125, 0.6111465454101562, 0.6114146728515625, 0.6116507568359375, 0.612426025390625, 0.61399951171875, 0.613014404296875]",tokens/s,418.0422114089963,kWh,7.21943411562178e-06,3.95425479798766e-06,3.477342651153299e-05,4.594711542514243e-05,tokens/kWh,5571622.889299289,MB,1662.808064,5516.034048,0.0,4869.586944,4769.651712,s,10,360.87087890624997,36.087087890625,0.01748383820965051,36.082986328125,36.11191953125,36.119573046875,36.125695859375,"[36.11021875, 36.1272265625, 36.08849609375, 36.07920703125, 36.0845625, 36.08481640625, 36.08141015625, 36.0650625, 36.07158203125, 36.078296875]",tokens/s,1.7457767773045123,kWh,0.00042595356252458363,0.00023346088450391426,0.0019820382702698735,0.0026414527172983716,tokens/kWh,23850.51210170259,,s,629,365.80599053955063,0.5815675525271077,0.07276675093795772,0.5726207885742187,0.5739550537109376,0.5744299926757812,1.1846730029296875,"[0.5736171264648438, 0.573517822265625, 0.5738854370117188, 0.57415576171875, 0.5742704467773437, 0.5731041259765625, 0.57312255859375, 0.5737461547851562, 0.5739632568359375, 0.5725850219726563, 0.5728665161132812, 0.5735147705078125, 0.5726494750976563, 0.573000732421875, 0.57260546875, 0.5726546020507812, 0.572073974609375, 0.5728809204101563, 0.572885986328125, 0.5725767822265625, 0.5731676025390625, 0.5745838012695312, 0.5729105834960937, 0.5730211791992188, 0.5729566650390625, 0.5723678588867187, 0.5724866333007812, 0.5726197509765625, 0.5735526123046875, 0.5740676879882812, 0.5729720458984375, 0.5732095947265625, 0.5726760864257813, 0.5724497680664062, 0.5724518432617187, 0.572790771484375, 0.5723668823242187, 0.5727159423828125, 0.5731563720703124, 0.5741424560546875, 0.5723576049804687, 0.5724334106445312, 0.5723883666992188, 0.5724548950195313, 0.5725368041992187, 0.5722142944335937, 0.5739407348632812, 0.5740206298828125, 0.572927001953125, 0.573212646484375, 0.5730181274414062, 0.5728184204101563, 0.5730816040039063, 0.5726177368164063, 0.572484619140625, 0.5727293701171875, 0.5740543823242188, 0.5732669677734376, 0.57483056640625, 0.574424072265625, 0.5744517211914062, 0.5744885864257813, 1.18734130859375, 0.5729505004882812, 0.5725030517578125, 0.5725654907226563, 0.5738137817382812, 0.572379150390625, 0.5741465454101562, 0.574750732421875, 0.573475830078125, 0.5734666137695312, 0.5736826782226563, 0.5750292358398438, 0.5742151489257813, 0.5739274291992188, 0.5746913452148438, 0.5744281616210938, 0.5742459106445312, 0.5746903076171875, 0.5750845336914062, 0.5739735107421875, 0.5736888427734375, 0.574476318359375, 0.5740001220703125, 0.573032470703125, 0.5722992553710937, 0.57221630859375, 0.571994140625, 0.573065185546875, 0.573844482421875, 0.5739673461914062, 0.5742745361328125, 0.5743206787109375, 0.5743236694335937, 0.5738577880859375, 0.5742110595703125, 0.5753764038085938, 0.5745797119140625, 0.57402880859375, 0.5725982666015625, 0.5732260131835938, 0.5737594604492188, 0.5739530029296875, 0.5731553344726562, 0.572927978515625, 0.57257470703125, 0.5744312133789062, 0.5755975952148438, 0.5727283325195313, 0.5721149291992188, 0.572199951171875, 0.5723617553710938, 0.572105712890625, 0.57270068359375, 0.5733294067382813, 0.5723873291015625, 0.57191015625, 0.573065185546875, 0.5727866821289063, 0.5724027099609375, 0.5726883544921875, 0.5723258666992187, 0.5720657958984375, 0.5731727294921874, 1.1836395263671875, 0.5721917724609376, 0.5728533325195313, 0.5732484741210937, 0.5725409545898438, 0.5732628784179687, 0.5741773071289062, 0.5730109252929687, 0.5731737670898438, 0.573043701171875, 0.5734686889648437, 0.5732505493164063, 0.5726914672851563, 0.5729924926757812, 0.5729525756835937, 0.5732260131835938, 0.574719970703125, 0.5727836303710937, 0.5730283813476562, 0.5724508056640625, 0.5733519287109375, 0.5728767700195313, 0.5725736694335938, 0.5743431396484375, 0.5745357055664062, 0.573507568359375, 0.572695556640625, 0.5729177856445312, 0.573022216796875, 0.5733980102539062, 0.5731030883789062, 0.5730037841796874, 0.572927978515625, 0.5738895263671875, 0.573137939453125, 0.57310107421875, 0.5718549194335938, 0.5721885986328125, 0.57208935546875, 0.5725255737304688, 0.5722398681640625, 0.5733836669921875, 0.5725736694335938, 0.57238525390625, 0.5722880249023438, 0.5720064086914063, 0.5721456909179687, 0.57385986328125, 0.5721773681640625, 0.5720176391601562, 0.5720484008789063, 0.5730570068359375, 0.5720934448242188, 0.5720852661132813, 0.5719961547851562, 0.5719992065429688, 0.5727928466796876, 0.5721221313476562, 0.5721630859375, 0.572600341796875, 0.5723125610351563, 0.5724631958007812, 0.5727794799804687, 1.1843287353515626, 0.572779541015625, 0.57261669921875, 0.5734041748046875, 0.5739274291992188, 0.57350146484375, 0.57328125, 0.5731553344726562, 0.5729392700195313, 0.5729403076171875, 0.5732833251953126, 0.57289013671875, 0.5743707885742187, 0.5724887084960938, 0.5720043334960937, 0.5718681640625, 0.5722941284179688, 0.5721036987304687, 0.5721763916015625, 0.57200537109375, 0.5717247924804687, 0.572310546875, 0.5729740600585937, 0.5724682006835937, 0.5719664916992188, 0.5723975830078125, 0.5722286376953125, 0.5723289794921875, 0.572221435546875, 0.5728389282226563, 0.5737000732421875, 0.5728737182617187, 0.5730037841796874, 0.572178466796875, 0.5720411376953125, 0.5717974853515625, 0.5721036987304687, 0.5720719604492187, 0.5719695434570312, 0.5730037841796874, 0.5722941284179688, 0.5718763427734375, 0.5725706176757812, 0.5727713012695312, 0.5752719116210937, 0.5729822998046875, 0.5730355224609375, 0.5741107177734375, 0.5727109375, 0.5726207885742187, 0.5727815551757812, 0.5729136352539063, 0.572896240234375, 0.572600341796875, 0.5730191650390625, 0.5724456787109375, 0.573222900390625, 0.57379736328125, 0.5722828979492187, 0.572052490234375, 0.5719766845703125, 0.5719429321289062, 0.5717380981445312, 1.1848990478515624, 0.5739724731445313, 0.5733632202148438, 0.5733693237304688, 0.5734880981445313, 0.5738117065429688, 0.5729403076171875, 0.5730672607421875, 0.5729863891601562, 0.5730140380859375, 0.57394482421875, 0.5726064453125, 0.5722265625, 0.5720391845703126, 0.5731246337890625, 0.5720433349609375, 0.57220703125, 0.5742510375976563, 0.5736365966796875, 0.572663818359375, 0.5723699340820313, 0.5724794921875, 0.5720340576171875, 0.5722705688476563, 0.5724047241210938, 0.57236376953125, 0.5725368041992187, 0.5725255737304688, 0.5728256225585937, 0.5726392211914062, 0.5721354370117188, 0.5721804809570312, 0.5721774291992188, 0.5723484497070312, 0.5721793823242187, 0.572990478515625, 0.5730048217773438, 0.5721558837890625, 0.572242919921875, 0.5721978759765625, 0.5722470703125, 0.5720596313476562, 0.5724047241210938, 0.5724436645507812, 0.5722009887695313, 0.5737861328125, 0.57619970703125, 0.5726668701171875, 0.5722962036132813, 0.5730099487304687, 0.5725439453125, 0.5726085205078125, 0.5728368530273438, 0.5743206176757812, 0.5726044311523437, 0.5726791381835937, 0.5723678588867187, 0.5721937866210938, 0.572822509765625, 0.5727109375, 0.5722070922851562, 0.5722654418945312, 0.5724968872070313, 1.1851029052734374, 0.5723607177734376, 0.57244775390625, 0.5727620849609375, 0.5731502075195313, 0.5729341430664062, 0.5733734130859375, 0.5732301025390625, 0.572663818359375, 0.5735577392578125, 0.5724036865234375, 0.5722737426757812, 0.5723555297851562, 0.5722726440429687, 0.5723494262695312, 0.5725409545898438, 0.5730109252929687, 0.57223681640625, 0.5723402099609375, 0.5722613525390625, 0.572516357421875, 0.5727579956054687, 0.5724467163085938, 0.572221435546875, 0.5731195068359375, 0.5724169921875, 0.5723781127929688, 0.5720811767578124, 0.5718343505859375, 0.5718425903320312, 0.57302734375, 0.5730027465820312, 0.5725501708984375, 0.5730252685546875, 0.57335498046875, 0.5724067993164063, 0.5720145874023438, 0.5724354858398437, 0.5753599853515625, 0.572169189453125, 0.5720125732421875, 0.5726105346679687, 0.5732147216796875, 0.57269970703125, 0.5723483276367187, 0.572010498046875, 0.5722890014648437, 0.5727958984375, 0.5723873901367188, 0.57226953125, 0.5728460693359375, 0.57364892578125, 0.5736980590820312, 0.5732618408203125, 0.5729075317382812, 0.5730396118164063, 0.5730027465820312, 0.5737277221679687, 0.5737984008789062, 0.574740478515625, 0.5733519287109375, 0.5725675659179688, 0.572537841796875, 1.184806884765625, 0.5725839233398438, 0.5726617431640625, 0.573106201171875, 0.5745018920898437, 0.5735618286132812, 0.573259765625, 0.5744578857421875, 0.5749258422851562, 0.5728265991210938, 0.5725081787109375, 0.5727150268554687, 0.5735505981445312, 0.573075439453125, 0.5722562255859375, 0.57212109375, 0.5720760498046875, 0.5723033447265625, 0.5736908569335938, 0.5737195434570312, 0.5725010375976562, 0.57371337890625, 0.5728123168945313, 0.5738147583007812, 0.571978759765625, 0.5718763427734375, 0.5724252319335937, 0.5723299560546875, 0.5721712646484375, 0.5726679077148438, 0.57254296875, 0.5723995971679687, 0.5719193725585937, 0.5722777709960938, 0.571779052734375, 0.5716900024414062, 0.5719306030273438, 0.5717493896484375, 0.571821044921875, 0.5734307861328125, 0.5732904663085937, 0.5728604125976563, 0.572748779296875, 0.57318603515625, 0.5727365112304688, 0.5732413330078125, 0.57249072265625, 0.5729822998046875, 0.573169677734375, 0.5721641235351562, 0.5722808227539062, 0.5722900390625, 0.57253173828125, 0.5722101440429688, 0.5723197631835938, 0.5721200561523437, 0.5727498168945313, 0.5729525756835937, 0.5725030517578125, 0.5723596801757812, 0.5721190185546875, 0.5728788452148438, 0.57232177734375, 1.18618115234375, 0.5729382934570313, 0.5724763793945312, 0.5728051147460937, 0.572410888671875, 0.5722828979492187, 0.5726187744140625, 0.57223681640625, 0.5720278930664062, 0.5722142944335937, 0.5735720825195313, 0.5724139404296875, 0.5722695922851563, 0.5720924072265625, 0.572410888671875, 0.5720698852539062, 0.5738741455078125, 0.5721026611328125, 0.5729136352539063, 0.5728265991210938, 0.5724375, 0.5722224731445312, 0.57202587890625, 0.573212646484375, 0.572484619140625, 0.5720780639648437, 0.57230859375, 0.57369384765625, 0.5728798828125, 0.572020751953125, 0.5721231079101563, 0.5717554931640625, 0.572156982421875, 0.5722654418945312, 0.5718435668945312, 0.5726340942382813, 0.573137939453125, 0.5722542114257813, 0.5726156616210938, 0.572095458984375, 0.572031982421875, 0.5723658447265625, 0.5724303588867188, 0.5722726440429687, 0.5725307006835938, 0.5731655883789063, 0.5721272583007813, 0.5720043334960937, 0.572042236328125, 0.572242919921875, 0.5721856079101563, 0.5724548950195313, 0.5724661865234375, 0.5730764770507812, 0.5732301025390625, 0.5725450439453125, 0.5724866333007812, 0.5721734008789062, 0.5719572143554688, 0.5721549072265625, 0.572000244140625, 0.5720064086914063, 0.5725757446289063, 1.1873709716796874, 0.5724487915039063, 0.5732085571289063, 0.572284912109375, 0.5727989501953125, 0.5728409423828125, 0.5729740600585937, 0.573497314453125, 0.572516357421875, 0.572705810546875, 0.5721886596679687, 0.5721958618164062, 0.5724036865234375, 0.5723381958007813, 0.57270068359375, 0.5734564208984375, 0.5733170776367188, 0.5739089965820312, 0.5724610595703125, 0.5728818969726562, 0.5730293579101563, 0.57270068359375, 0.572495849609375, 0.5729935302734375, 0.5724405517578125, 0.572822509765625, 0.5726105346679687, 0.572315673828125, 0.5717718505859375, 0.5719408569335938, 0.5722603759765625, 0.5722347412109375, 0.5724713134765625, 0.5740123901367188, 0.5722296142578125, 0.5719521484375, 0.572314697265625, 0.572317626953125, 0.5720811767578124, 0.5719705810546875, 0.5717545166015625, 0.5723504638671875, 0.5726515502929688, 0.5724016723632812, 0.5723504638671875, 0.572231689453125, 0.572205078125, 0.5721190185546875, 0.572031982421875, 0.5724467163085938, 0.573043701171875, 0.5730468139648438, 0.5726412353515625, 0.5725030517578125, 0.5725399169921875, 0.5724713134765625, 0.5720606689453125, 0.5723914184570312, 0.5728767700195313, 0.573179931640625, 0.5726597290039063, 0.572295166015625, 0.5719317016601563, 1.1857969970703126, 0.5727313842773437, 0.572368896484375, 0.572347412109375, 0.5732669677734376, 0.5725573120117188, 0.5721907348632812, 0.5723095092773437, 0.5727498168945313, 0.5725081787109375, 0.5725491333007813, 0.5726720581054687, 0.5733457641601563, 0.5733345336914063, 0.5721354370117188, 0.5723043823242188, 0.5719183349609375, 0.5717770385742188, 0.5724968872070313, 0.5726105346679687, 0.573053955078125, 0.5741567993164063, 0.5731102905273437, 0.5720698852539062, 0.5720145874023438, 0.57215185546875, 0.574278564453125, 0.5722521362304688, 0.5720453491210937, 0.5730723266601563, 0.5723924560546875, 0.5727354736328125, 0.572337158203125, 0.5723197631835938, 0.5723668212890625, 0.572221435546875, 0.5729075317382812, 0.5726556396484375, 0.5724456787109375, 0.5738014526367188, 0.5731779174804688, 0.572559326171875, 0.5726812133789062, 0.5728235473632812, 0.572885986328125, 0.5726618041992187, 0.5722745971679688, 0.5731082153320313, 0.5732781982421875, 0.5720135498046875, 0.5721507568359375, 0.57227880859375, 0.5728726806640625, 0.572346435546875, 0.5726781005859375, 0.5728286743164063, 0.5733345336914063, 0.5729228515625, 0.572737548828125, 0.5724548950195313, 0.5741793212890625, 0.5722265625, 0.572494873046875]",tokens/s,1.7194907034525255,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1440.489472,1709.703168,0.0,1063.256064,942.605312,s,10,0.8710727081298828,0.08710727081298827,0.0020526611434407644,0.0869946403503418,0.08830768203735351,0.09043918418884277,0.09214438591003418,"[0.09257068634033203, 0.08689478302001953, 0.08709449768066406, 0.08734588623046875, 0.0876402587890625, 0.08528950500488282, 0.08547952270507812, 0.08543309020996094, 0.08549046325683594, 0.08783401489257812]",tokens/s,2938.9050719957663,kWh,1.0307077509193605e-06,5.647778186555472e-07,2.6262726324173835e-06,4.221758201992291e-06,tokens/kWh,60638243.06166823,MB,1440.817152,1709.703168,0.0,1063.256064,942.607872,s,10,54.10364501953125,5.410364501953125,0.04442393039305028,5.402393798828125,5.47523671875,5.4771017578125,5.478593789062501,"[5.402685546875, 5.4450302734375, 5.474822265625, 5.478966796875, 5.39001171875, 5.366296875, 5.3479423828125, 5.3592861328125, 5.4365009765625, 5.40210205078125]",tokens/s,11.64431712082563,kWh,6.414297001805283e-05,3.5154539095709204e-05,0.00015373626912418487,0.00025303377823794696,tokens/kWh,248978.61636779693,,s,629,54.788874206542985,0.08710472846827182,0.01042761753839409,0.08542822265625,0.08754749298095703,0.08829050750732421,0.17079922241210937,"[0.08673382568359375, 0.08457215881347656, 0.08464179229736328, 0.08488550567626953, 0.08813362884521485, 0.08856473541259766, 0.08671949005126953, 0.08648601531982422, 0.08687923431396484, 0.08737894439697266, 0.08506470489501954, 0.0844554214477539, 0.0844400634765625, 0.08486911773681641, 0.08474524688720703, 0.08481378936767578, 0.08675328063964843, 0.08531455993652344, 0.08472268676757813, 0.08468172454833985, 0.08468172454833985, 0.0845322265625, 0.08479436492919921, 0.08471552276611329, 0.08475545501708984, 0.08474521636962891, 0.08502886199951172, 0.0846899871826172, 0.0848424301147461, 0.084569091796875, 0.08475955200195312, 0.08495104217529297, 0.08461721801757813, 0.08463571166992187, 0.08458643341064453, 0.08476876831054687, 0.08473804473876953, 0.08473190307617187, 0.0844554214477539, 0.08442777252197266, 0.08484761810302735, 0.08466124725341796, 0.08562483215332031, 0.08599142456054687, 0.08450764465332031, 0.0840970230102539, 0.08538521575927735, 0.08838041687011719, 0.0868485107421875, 0.08694374084472656, 0.08736255645751953, 0.08686080169677735, 0.08700518035888671, 0.08699289703369141, 0.08753254699707032, 0.0868485107421875, 0.08696627044677735, 0.08728985595703125, 0.08770150756835937, 0.08642969512939454, 0.08510566711425781, 0.08681779479980468, 0.1740953674316406, 0.08652799987792968, 0.08564736175537109, 0.0842209243774414, 0.08706150054931641, 0.08500326538085938, 0.08664473724365235, 0.08596275329589843, 0.0853391342163086, 0.08664268493652344, 0.08708096313476563, 0.08690073394775391, 0.08696934509277343, 0.08694783782958984, 0.08664575958251954, 0.08597196960449219, 0.08684748840332031, 0.08683110046386719, 0.08509439849853516, 0.08638566589355469, 0.08632217407226563, 0.08716287994384765, 0.087119873046875, 0.08713215637207031, 0.08506470489501954, 0.08651168060302734, 0.08632109069824219, 0.08708403015136719, 0.08612662506103516, 0.08650748443603516, 0.08707481384277344, 0.08641535949707031, 0.08600678253173828, 0.08589107513427735, 0.08514252471923828, 0.08699494171142579, 0.08681676483154296, 0.08650956726074219, 0.0871383056640625, 0.08712806701660156, 0.08630374145507813, 0.08593612670898437, 0.08639590454101563, 0.08506681823730469, 0.08687712097167968, 0.08704512023925781, 0.08696115112304688, 0.08838451385498047, 0.08482816314697265, 0.08464895629882813, 0.08431206512451171, 0.08453427124023437, 0.08795238494873046, 0.08794931030273437, 0.08684134674072265, 0.08686592102050782, 0.08699801635742188, 0.08754073333740234, 0.087947265625, 0.08687513732910156, 0.08657100677490234, 0.08599350738525391, 0.08582653045654297, 0.17401548767089844, 0.0865423355102539, 0.08694374084472656, 0.08704204559326172, 0.08744652557373046, 0.08659967803955078, 0.08703897857666015, 0.08700109100341796, 0.08715776062011718, 0.08681574249267578, 0.08705433654785157, 0.08619929504394531, 0.08701548767089844, 0.086857666015625, 0.08725811004638671, 0.08656588745117187, 0.08733184051513672, 0.0871731185913086, 0.08719257354736328, 0.08709120178222657, 0.0869713897705078, 0.0868485107421875, 0.08620236968994141, 0.08700415802001953, 0.08701337432861328, 0.0868331527709961, 0.08713420867919922, 0.08696627044677735, 0.08672972869873047, 0.08645836639404297, 0.08661094665527344, 0.08728678131103515, 0.08693452453613282, 0.08721206665039062, 0.08688022613525391, 0.08789299011230468, 0.08672870635986328, 0.08694477081298828, 0.087731201171875, 0.08747007751464844, 0.08717005157470703, 0.08711270141601563, 0.08729190063476562, 0.08698271942138672, 0.08669484710693359, 0.08654745483398438, 0.08669286346435547, 0.08840806579589844, 0.08711475372314453, 0.08676044464111328, 0.08631705474853515, 0.08654956817626953, 0.08599648284912109, 0.08647679901123047, 0.08694989013671875, 0.08696524810791016, 0.08698060607910156, 0.08548556518554687, 0.08556953430175782, 0.0868823013305664, 0.08658534240722657, 0.08709939575195312, 0.08710451507568359, 0.17466163635253906, 0.08715673828125, 0.08728371429443359, 0.08680345916748047, 0.08702361297607422, 0.0872099838256836, 0.08695299530029296, 0.0865228500366211, 0.08702054595947266, 0.08711270141601563, 0.08681267547607421, 0.0862402572631836, 0.08722022247314454, 0.08714342498779297, 0.08641433715820312, 0.08614297485351563, 0.08621363067626953, 0.08743526458740235, 0.0874260482788086, 0.08645123291015624, 0.08728675079345703, 0.08692940521240235, 0.08707891082763672, 0.08719055938720703, 0.08709014129638672, 0.08714854431152344, 0.08714035034179687, 0.08686592102050782, 0.0871751708984375, 0.08751411437988281, 0.08707379150390625, 0.08701952362060547, 0.08471449279785156, 0.08613683319091797, 0.08813772583007813, 0.08714240264892578, 0.08702566528320313, 0.0872959976196289, 0.08773222351074218, 0.08883302307128907, 0.08757759857177734, 0.08561151885986328, 0.08676659393310547, 0.08834764862060547, 0.08476467132568359, 0.08778854370117188, 0.08729497528076172, 0.08688441467285156, 0.08712287902832032, 0.0869744644165039, 0.0868136978149414, 0.08646246337890626, 0.08621875, 0.08678707122802734, 0.08692428588867188, 0.08732262420654296, 0.08703692626953125, 0.08800665283203125, 0.08684236907958984, 0.08649215698242188, 0.0869048309326172, 0.08684236907958984, 0.08680857849121094, 0.1740042266845703, 0.08717619323730469, 0.0870635528564453, 0.0868219223022461, 0.08698159790039063, 0.08682291412353516, 0.08556953430175782, 0.08729804992675781, 0.08690278625488282, 0.08721305847167969, 0.08680857849121094, 0.08707686614990234, 0.08708812713623047, 0.0862003173828125, 0.08698265838623047, 0.08703385925292968, 0.08684031677246094, 0.08691814422607422, 0.08724281311035156, 0.08704608154296875, 0.08688742065429687, 0.08603648376464844, 0.08850739288330078, 0.08509849548339844, 0.08658636474609376, 0.08696934509277343, 0.08461007690429688, 0.08453526306152344, 0.08452095794677734, 0.08469811248779296, 0.084959228515625, 0.08464383697509766, 0.08490086364746094, 0.08458649444580078, 0.08620851135253907, 0.08480665588378906, 0.08471449279785156, 0.08450867462158203, 0.08490086364746094, 0.0845486068725586, 0.08454962921142578, 0.08446873474121094, 0.0846909408569336, 0.0845506591796875, 0.08456806182861328, 0.08460185241699218, 0.08472268676757813, 0.08459366607666016, 0.08461414337158203, 0.0848005142211914, 0.08458854675292969, 0.08446669006347657, 0.08458854675292969, 0.08470323181152344, 0.08453427124023437, 0.08449433898925782, 0.08460192108154296, 0.08460384368896484, 0.08456192016601563, 0.08444825744628906, 0.08477184295654297, 0.0844421157836914, 0.08439193725585938, 0.17255935668945313, 0.08706764984130859, 0.0871720962524414, 0.0869908447265625, 0.08572518157958985, 0.08475033569335938, 0.0848353271484375, 0.08454962921142578, 0.08473804473876953, 0.08482406616210937, 0.08465305328369141, 0.08468479919433594, 0.08473395538330078, 0.08460902404785156, 0.08453529357910156, 0.08463053131103515, 0.0845455322265625, 0.08432128143310547, 0.08473190307617187, 0.08471449279785156, 0.084578369140625, 0.08457823944091797, 0.08466944122314453, 0.08652288055419922, 0.08498278045654296, 0.08480255889892578, 0.08473702239990234, 0.08468787384033204, 0.08633650970458985, 0.0850851821899414, 0.08458751678466797, 0.08468991851806641, 0.08474726104736328, 0.0846909408569336, 0.0885032958984375, 0.08480870056152344, 0.0858071060180664, 0.08694374084472656, 0.08694989013671875, 0.08657920074462891, 0.08678604888916015, 0.08691814422607422, 0.084853759765625, 0.08535858917236328, 0.08599654388427734, 0.08458137512207031, 0.08471347045898438, 0.084495361328125, 0.08461516571044922, 0.0845322265625, 0.08443698883056641, 0.0841891860961914, 0.08461619567871094, 0.0845137939453125, 0.0844031982421875, 0.08491827392578125, 0.08468889617919922, 0.08446873474121094, 0.08431718444824218, 0.0844595489501953, 0.08526640319824219, 0.08521421051025391, 0.0846397476196289, 0.1709055938720703, 0.08506777954101563, 0.08515379333496094, 0.08487731170654297, 0.08472576141357421, 0.08464383697509766, 0.08466124725341796, 0.08488448333740234, 0.08489778900146484, 0.08490598297119141, 0.08465203094482422, 0.0848189468383789, 0.08532991790771484, 0.08477286529541016, 0.08459468841552735, 0.08518246459960938, 0.08490803527832032, 0.0846561279296875, 0.08462028503417969, 0.08444313812255859, 0.0862208023071289, 0.08462643432617188, 0.08517222595214843, 0.08487423706054688, 0.08629145812988281, 0.08506470489501954, 0.0851937255859375, 0.08485785675048828, 0.08452505493164063, 0.0856289291381836, 0.08493059539794921, 0.08512406158447265, 0.08508620452880859, 0.08450457763671874, 0.084515869140625, 0.08459465789794922, 0.08446975708007813, 0.08475852966308593, 0.08493772888183594, 0.08463161468505859, 0.08454649353027344, 0.0845322265625, 0.08463667297363281, 0.08475753784179688, 0.08451376342773438, 0.08450662231445312, 0.08457318115234375, 0.08449842834472657, 0.08456294250488282, 0.08457523345947265, 0.08489676666259766, 0.08472576141357421, 0.087193603515625, 0.08506368255615235, 0.08472064208984376, 0.08463565063476562, 0.08460390472412109, 0.08499712371826172, 0.08570368194580077, 0.08466124725341796, 0.08457625579833984, 0.08450355529785156, 0.08474214172363281, 0.17052569580078125, 0.0847298583984375, 0.08458444976806641, 0.08468275451660157, 0.08454041290283203, 0.08447081756591797, 0.08448406219482422, 0.08457523345947265, 0.0845301742553711, 0.0844247055053711, 0.08477286529541016, 0.08456502532958984, 0.08428745269775391, 0.08627097320556641, 0.08500633239746094, 0.08606105804443359, 0.08684646606445312, 0.08513126373291016, 0.08685874938964844, 0.08501042938232421, 0.08475545501708984, 0.0846346206665039, 0.08462438201904297, 0.08665497589111328, 0.08509542083740235, 0.08484454345703125, 0.08662322998046874, 0.08476467132568359, 0.08460492706298828, 0.0845998077392578, 0.08459878540039062, 0.08473702239990234, 0.08481484985351563, 0.08530022430419922, 0.08468275451660157, 0.08468685150146485, 0.08445645141601563, 0.08475548553466797, 0.08516194915771484, 0.08527359771728515, 0.08474931335449219, 0.08479542541503907, 0.08579888153076172, 0.08668876647949218, 0.08599346923828124, 0.08607129669189453, 0.08541696166992188, 0.08659865570068359, 0.08641024017333984, 0.08462847900390626, 0.08492339324951172, 0.0848875503540039, 0.084674560546875, 0.08466534423828125, 0.08485273742675781, 0.08462643432617188, 0.08465408325195313, 0.08468479919433594, 0.08457011413574218, 0.0847984619140625, 0.08467558288574219, 0.08471142578125, 0.08483334350585937, 0.1702665557861328, 0.08738406372070312, 0.08923648071289063, 0.08826573181152343, 0.08826470184326173, 0.08866099548339844, 0.08817049407958985, 0.08779468536376953, 0.08849612426757812, 0.08879206085205078, 0.08808345794677734, 0.08809164428710937, 0.0881233901977539, 0.08886784362792968, 0.08884019470214843, 0.08862515258789062, 0.08821043395996093, 0.08844905853271484, 0.08820121765136718, 0.08817353820800782, 0.08852275085449218, 0.08854227447509766, 0.08476563262939453, 0.0872243194580078, 0.08712806701660156, 0.08852275085449218, 0.08827391815185547, 0.08493363189697266, 0.08474214172363281, 0.08446873474121094, 0.08490598297119141, 0.08460697937011719, 0.08487014770507813, 0.08471858978271485, 0.08479743957519531, 0.0852490234375, 0.08817971038818359, 0.0883599395751953, 0.08766566467285156, 0.0848189468383789, 0.08454348754882812, 0.08476876831054687, 0.08462950134277344, 0.08455782318115235, 0.0845998077392578, 0.08457523345947265, 0.08428851318359375, 0.08459468841552735, 0.08465920257568359, 0.08506470489501954, 0.08472268676757813, 0.08413491058349609, 0.08476467132568359, 0.08465715026855469, 0.08452915191650391, 0.08448102569580078, 0.0847267837524414, 0.08448614501953125, 0.08460594940185547, 0.08444416046142578, 0.08624127960205077, 0.08508006286621093, 0.08459878540039062, 0.1760184326171875, 0.08723353576660156, 0.08769331359863282, 0.08806092834472656, 0.08757453155517578, 0.08732978820800781, 0.08919449615478516, 0.08813977813720703, 0.08797286224365235, 0.08746189117431641, 0.08733798217773438, 0.08667750549316407, 0.08574054718017578, 0.08622182464599609, 0.08452095794677734, 0.08488345336914062, 0.08462950134277344, 0.08516403198242188, 0.08760736083984375, 0.08629344177246094, 0.08519782257080079, 0.08494694519042968, 0.08468275451660157, 0.08495308685302734, 0.08589212799072266, 0.08511488342285156, 0.08629449462890625, 0.08508927917480469, 0.08481075286865235, 0.08436326599121094, 0.08498995208740234, 0.08503705596923829, 0.08542822265625, 0.08469503784179687, 0.08443392181396485, 0.08501554870605468, 0.0846376953125, 0.08496742248535157, 0.08612351989746093, 0.08490290832519531, 0.08461516571044922, 0.08477081298828125, 0.0848005142211914, 0.08621977233886718, 0.08505651092529297, 0.08459571075439454, 0.08459878540039062, 0.08486918640136719, 0.08474515533447266, 0.08450969696044922, 0.08503501129150391, 0.0847267837524414, 0.08468889617919922, 0.08508927917480469, 0.0886118392944336, 0.08830156707763671, 0.08738508605957031, 0.08487321472167969, 0.0855920639038086, 0.08490496063232422, 0.08478208160400391, 0.08481689453125, 0.08506368255615235]",tokens/s,11.480433009606973,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a341f-5cf6b6431fab86f8674dbfaf;4c25d773-1e82-4cd0-a5bc-f374af5a3e19) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32c3-371e02a51b1b78b24cf6bbbc;5eb24f73-8292-422f-aad0-625415d80e62) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2109.853696,2844.2624,0.0,2197.815296,1927.351296,s,10,2.3800795135498047,0.23800795135498043,0.0010254107368052306,0.23830428314208985,0.23930529022216795,0.23934926986694335,0.23938445358276367,"[0.2383564453125, 0.23939324951171875, 0.23619731140136718, 0.23845465087890624, 0.23723452758789063, 0.23663900756835937, 0.23750480651855468, 0.2382521209716797, 0.2387518768310547, 0.23929551696777343]",tokens/s,1075.594317511624,kWh,2.793223439723023e-06,1.5305452026781454e-06,1.2162554949675677e-05,1.6486323592076845e-05,tokens/kWh,15528022.276781643,MB,2109.853696,2844.2624,0.0,2197.815296,2031.97184,s,10,139.4725029296875,13.94725029296875,0.016539028053013265,13.941937500000002,13.97250703125,13.977812109375,13.982056171875,"[13.953560546875, 13.945388671875, 13.94201171875, 13.9831171875, 13.9309609375, 13.9364580078125, 13.92993359375, 13.94186328125, 13.971328125, 13.937880859375]",tokens/s,4.517019389245513,kWh,0.000164597820511391,9.021296607492203e-05,0.0007178143901425277,0.0009726251767288406,tokens/kWh,64773.153633430826,,s,629,141.39150625610358,0.2247877682926924,0.028297149074530483,0.2211778564453125,0.22228438720703125,0.22277959289550783,0.45843198486328124,"[0.22260429382324218, 0.22169497680664063, 0.2212351989746094, 0.22115533447265626, 0.22176870727539064, 0.22137344360351563, 0.22118502807617188, 0.22141644287109374, 0.22102528381347655, 0.22195199584960937, 0.2219018249511719, 0.22150553894042968, 0.22119935607910157, 0.2213509063720703, 0.22125465393066407, 0.22120550537109376, 0.22150962829589843, 0.2213017578125, 0.22201548767089843, 0.2213632049560547, 0.22165196228027345, 0.22151373291015625, 0.22148095703125, 0.22154649353027345, 0.22147174072265624, 0.22180044555664064, 0.22122802734375, 0.22110105895996093, 0.22116249084472656, 0.221444091796875, 0.22163456726074218, 0.22287872314453125, 0.2216417236328125, 0.222202880859375, 0.2216407012939453, 0.22104165649414062, 0.22102323913574218, 0.22114508056640625, 0.22116044616699218, 0.22107955932617188, 0.2210508728027344, 0.22112460327148437, 0.22108979797363282, 0.2215557098388672, 0.22159564208984375, 0.22146662902832032, 0.2216785888671875, 0.22263194274902343, 0.22135398864746095, 0.22125056457519532, 0.22187930297851563, 0.22126797485351563, 0.2213396453857422, 0.22115225219726561, 0.22118502807617188, 0.22127410888671875, 0.22127104187011717, 0.22109901428222656, 0.22111744689941407, 0.22118911743164063, 0.2214871063232422, 0.22157005310058595, 0.46144613647460936, 0.22118707275390626, 0.2218403778076172, 0.2209945526123047, 0.22095564270019533, 0.22111538696289063, 0.22113792419433595, 0.22112973022460938, 0.22129766845703125, 0.22095974731445311, 0.22107244873046875, 0.2212720031738281, 0.22100991821289062, 0.22110617065429689, 0.22098739624023436, 0.22138470458984374, 0.22130073547363283, 0.22108773803710938, 0.22093721008300782, 0.2209566650390625, 0.2209310760498047, 0.2208204803466797, 0.22089932250976563, 0.22095155334472658, 0.22101708984375, 0.22186495971679687, 0.22131610107421876, 0.22127513122558592, 0.22142361450195314, 0.22218751525878908, 0.2213519287109375, 0.2209669189453125, 0.22220083618164063, 0.2210744323730469, 0.22159461975097655, 0.22113792419433595, 0.22117990112304686, 0.2222161865234375, 0.22108262634277343, 0.2210293731689453, 0.22102117919921874, 0.22107955932617188, 0.22112562561035157, 0.22216499328613282, 0.22140518188476563, 0.22183526611328125, 0.22283879089355468, 0.22166937255859376, 0.2215045166015625, 0.22186904907226562, 0.22122700500488282, 0.2214256591796875, 0.22139903259277344, 0.2215004119873047, 0.22137753295898438, 0.22115737915039063, 0.22106419372558594, 0.22163967895507813, 0.22111334228515625, 0.22137753295898438, 0.2212833251953125, 0.2219683837890625, 0.22127923583984374, 0.45813043212890625, 0.22103347778320312, 0.2213949432373047, 0.22112973022460938, 0.22099250793457031, 0.22118502807617188, 0.2218219451904297, 0.22100376892089843, 0.221154296875, 0.22091162109375, 0.22253260803222658, 0.22119935607910157, 0.2210529327392578, 0.22278041076660157, 0.22130482482910158, 0.22150553894042968, 0.2213519287109375, 0.22121983337402343, 0.2217584686279297, 0.22111436462402342, 0.22102015686035156, 0.22090444946289062, 0.22078054809570313, 0.2211778564453125, 0.22104576110839844, 0.2211420135498047, 0.22199909973144533, 0.22127308654785155, 0.22181170654296875, 0.2217164764404297, 0.2210918426513672, 0.2213201904296875, 0.22124339294433593, 0.2210150451660156, 0.22111231994628905, 0.22106008911132813, 0.22112051391601562, 0.22103245544433595, 0.22106419372558594, 0.2211778564453125, 0.22102323913574218, 0.22111846923828124, 0.22116146850585938, 0.2212884521484375, 0.22095872497558594, 0.22103347778320312, 0.221048828125, 0.2216294403076172, 0.22115122985839844, 0.22118400573730468, 0.221127685546875, 0.22102117919921874, 0.22113177490234376, 0.22128025817871094, 0.2211031036376953, 0.22143180847167968, 0.22129254150390626, 0.22108773803710938, 0.22222642517089844, 0.22198886108398438, 0.2217205810546875, 0.2217574462890625, 0.22159461975097655, 0.4614256591796875, 0.22283775329589844, 0.22295756530761718, 0.22274969482421875, 0.2225858612060547, 0.22346035766601563, 0.22301695251464843, 0.22341127014160156, 0.22270252990722655, 0.22250291442871092, 0.22281216430664064, 0.22286746215820313, 0.22281011962890626, 0.22280703735351562, 0.22268620300292968, 0.22271487426757813, 0.22270976257324218, 0.22246092224121095, 0.222814208984375, 0.22279373168945313, 0.22285516357421875, 0.222635009765625, 0.2227783660888672, 0.2227271728515625, 0.22253773498535157, 0.22261862182617187, 0.223025146484375, 0.22222540283203124, 0.22242611694335937, 0.22123930358886718, 0.22103450012207032, 0.2209105987548828, 0.22106008911132813, 0.22086451721191405, 0.2209740753173828, 0.22110208129882813, 0.2218260498046875, 0.2210365447998047, 0.2210713653564453, 0.22134988403320313, 0.22149017333984375, 0.22106419372558594, 0.22114309692382814, 0.22111225891113281, 0.2225971221923828, 0.22118400573730468, 0.22139187622070314, 0.22111538696289063, 0.22112153625488282, 0.22092594909667967, 0.22134783935546876, 0.221127685546875, 0.22192127990722657, 0.2215854034423828, 0.22127308654785155, 0.22121881103515625, 0.2214686737060547, 0.2213396453857422, 0.22123008728027344, 0.22115327453613282, 0.22119218444824218, 0.22103347778320312, 0.22114303588867187, 0.45854925537109376, 0.22104678344726564, 0.22133247375488282, 0.22091571044921876, 0.22102630615234375, 0.2210508728027344, 0.22108364868164063, 0.22100274658203126, 0.22103141784667968, 0.22081741333007812, 0.22105906677246093, 0.22115737915039063, 0.22105702209472655, 0.22100991821289062, 0.22107749938964844, 0.22084402465820313, 0.22094540405273438, 0.22115327453613282, 0.22110719299316406, 0.220980224609375, 0.22154444885253907, 0.22112460327148437, 0.2211031036376953, 0.22097817993164062, 0.2210498504638672, 0.22134375, 0.2211266632080078, 0.22089112854003906, 0.2209566650390625, 0.22103756713867187, 0.2209976348876953, 0.22094540405273438, 0.22101708984375, 0.2214256591796875, 0.22106623840332032, 0.22102528381347655, 0.2213017578125, 0.2209976348876953, 0.221233154296875, 0.22100274658203126, 0.22102220153808594, 0.22098329162597657, 0.2211584014892578, 0.2211092529296875, 0.22123417663574219, 0.22111744689941407, 0.2211235809326172, 0.22127001953125, 0.22116761779785157, 0.22121881103515625, 0.2211031036376953, 0.2210498504638672, 0.221085693359375, 0.22113587951660157, 0.22125978088378906, 0.2211041259765625, 0.221154296875, 0.22228172302246094, 0.22109901428222656, 0.22121267700195313, 0.22124339294433593, 0.2211584014892578, 0.2214686737060547, 0.457933837890625, 0.2211962890625, 0.22194586181640624, 0.22120755004882814, 0.22100889587402345, 0.2212843475341797, 0.22189056396484375, 0.2212884521484375, 0.2211727294921875, 0.22092083740234375, 0.22090956115722657, 0.2211768341064453, 0.2210160675048828, 0.22107545471191406, 0.22154035949707032, 0.22111436462402342, 0.22106521606445312, 0.22105599975585938, 0.22096998596191406, 0.22127615356445313, 0.22098329162597657, 0.22110617065429689, 0.2209003448486328, 0.22102117919921874, 0.22100376892089843, 0.2210365447998047, 0.22115635681152343, 0.2209239044189453, 0.22113996887207032, 0.22109490966796874, 0.22142259216308594, 0.22121983337402343, 0.22105599975585938, 0.2209105987548828, 0.221159423828125, 0.22130482482910158, 0.22137651062011718, 0.22106521606445312, 0.22213119506835938, 0.22088088989257812, 0.2209105987548828, 0.2210150451660156, 0.22098739624023436, 0.22129458618164063, 0.22125363159179687, 0.22113894653320312, 0.22119833374023437, 0.2218956756591797, 0.22099250793457031, 0.22140518188476563, 0.22130892944335936, 0.22184857177734374, 0.22106930541992187, 0.22177484130859376, 0.22129254150390626, 0.22115635681152343, 0.22096485900878907, 0.22116864013671875, 0.22148915100097658, 0.22126797485351563, 0.221384765625, 0.2214911346435547, 0.22094540405273438, 0.45877044677734374, 0.2209187774658203, 0.22111744689941407, 0.22087065124511718, 0.22097509765625, 0.22129971313476562, 0.2210744323730469, 0.2212843475341797, 0.22100991821289062, 0.22094744873046876, 0.2208368682861328, 0.22104473876953126, 0.22097305297851563, 0.2210682830810547, 0.2209914855957031, 0.2208757781982422, 0.2209628143310547, 0.2210426940917969, 0.22097509765625, 0.22097203063964843, 0.22091468811035156, 0.22095872497558594, 0.22083993530273438, 0.22126591491699218, 0.2211420135498047, 0.22217318725585938, 0.22125978088378906, 0.22093618774414062, 0.2212351989746094, 0.22171852111816406, 0.22144717407226563, 0.22102323913574218, 0.221486083984375, 0.220980224609375, 0.2209935302734375, 0.2213939208984375, 0.22103858947753907, 0.22095462036132812, 0.22107647705078126, 0.22094744873046876, 0.22103858947753907, 0.22127206420898438, 0.22109286499023437, 0.2211461181640625, 0.22218240356445312, 0.22110719299316406, 0.2209812469482422, 0.22119833374023437, 0.22108876037597655, 0.22112870788574218, 0.22101913452148436, 0.22104678344726564, 0.2209669189453125, 0.22112870788574218, 0.2210682830810547, 0.22112051391601562, 0.22107034301757814, 0.22104473876953126, 0.22089421081542968, 0.221011962890625, 0.2210160675048828, 0.2210846710205078, 0.2209628143310547, 0.45941656494140626, 0.2209495086669922, 0.221739013671875, 0.2209863739013672, 0.22102117919921874, 0.221048828125, 0.22226022338867188, 0.22110617065429689, 0.2219622344970703, 0.22180557250976562, 0.22233804321289063, 0.2211041259765625, 0.22118911743164063, 0.22098329162597657, 0.2222192687988281, 0.22136012268066407, 0.22214041137695312, 0.2213079071044922, 0.22128947448730468, 0.22100787353515625, 0.221127685546875, 0.22152703857421874, 0.2220943298339844, 0.2211031036376953, 0.22113792419433595, 0.22100889587402345, 0.22107034301757814, 0.22086758422851563, 0.2210048065185547, 0.22096588134765624, 0.2210846710205078, 0.22087680053710937, 0.221154296875, 0.22088607788085937, 0.22132730102539064, 0.2210529327392578, 0.22137548828125, 0.22103450012207032, 0.22103450012207032, 0.22075392150878906, 0.22111949157714844, 0.2211420135498047, 0.2211461181640625, 0.2211041259765625, 0.22117990112304686, 0.2209812469482422, 0.22081843566894532, 0.22104371643066406, 0.22111949157714844, 0.22109286499023437, 0.22118502807617188, 0.22213119506835938, 0.22129049682617188, 0.22118092346191406, 0.22175027465820313, 0.2221670379638672, 0.22145330810546876, 0.221517822265625, 0.22109490966796874, 0.22186087036132812, 0.2214615020751953, 0.22115122985839844, 0.22144102478027344, 0.46024600219726564, 0.2209976348876953, 0.22151475524902345, 0.22144717407226563, 0.2214246368408203, 0.22156083679199218, 0.2218076171875, 0.22113690185546875, 0.2215905303955078, 0.22111949157714844, 0.2214993896484375, 0.22210354614257813, 0.221412353515625, 0.22127104187011717, 0.22124339294433593, 0.22104165649414062, 0.22167552185058595, 0.22163148498535157, 0.2216048583984375, 0.22217523193359376, 0.22170419311523437, 0.22118092346191406, 0.22107034301757814, 0.22108876037597655, 0.22134988403320313, 0.22161715698242188, 0.22139698791503906, 0.22093209838867187, 0.22118707275390626, 0.22141542053222657, 0.22160383605957032, 0.2212833251953125, 0.22284288024902343, 0.2227220458984375, 0.22272000122070312, 0.22289511108398438, 0.22466764831542968, 0.2222950439453125, 0.22271078491210938, 0.22245887756347657, 0.22309580993652345, 0.2229698486328125, 0.22274867248535157, 0.22293504333496095, 0.22252748107910156, 0.22249267578125, 0.22271795654296875, 0.22294834899902344, 0.2227640380859375, 0.22101913452148436, 0.2211420135498047, 0.22163967895507813, 0.22111949157714844, 0.22116761779785157, 0.2211420135498047, 0.22103347778320312, 0.22134066772460936, 0.22165606689453124, 0.22108979797363282, 0.22110823059082033, 0.2213621826171875, 0.22120448303222656, 0.22122802734375, 0.46055218505859374, 0.22127308654785155, 0.22119218444824218, 0.22088088989257812, 0.22082456970214845, 0.22128128051757812, 0.22141439819335937, 0.22119935607910157, 0.22096383666992186, 0.22105804443359375, 0.22094137573242187, 0.2210590057373047, 0.22101094055175782, 0.22181581115722657, 0.22244248962402344, 0.22087271118164062, 0.22137753295898438, 0.22102732849121093, 0.2222520294189453, 0.22111129760742188, 0.22082662963867186, 0.22154853820800782, 0.22087986755371095, 0.2212034606933594, 0.22097920227050782, 0.22116659545898437, 0.22161100769042968, 0.22113075256347656, 0.2215413818359375, 0.22253465270996095, 0.22111949157714844, 0.2211102752685547, 0.2211788787841797, 0.22141952514648439, 0.22129766845703125, 0.22112562561035157, 0.22104063415527345, 0.22099046325683594, 0.22102835083007813, 0.22090444946289062, 0.22094438171386718, 0.2210181121826172, 0.22109907531738282, 0.22128121948242188, 0.22107647705078126, 0.22143795776367187, 0.2210846710205078, 0.22113792419433595, 0.22100274658203126, 0.22100889587402345, 0.2211266632080078, 0.22113587951660157, 0.2213580780029297, 0.2212833251953125, 0.221154296875, 0.22113587951660157, 0.2210672607421875, 0.22181581115722657, 0.22194586181640624, 0.22132325744628906, 0.22108572387695313, 0.22134576416015625, 0.22117170715332032]",tokens/s,4.448640633764008,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3113-3f544b3956d609fd51983080;d82d252e-e9e1-4860-904e-1f1d23557356) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4397.162496,24111.480832,0.0,23465.033728,21690.932224,s,10,26.23831787109375,2.623831787109375,0.003031033246657479,2.6228023681640624,2.627721069335937,2.628540100097656,2.629195324707031,"[2.629359130859375, 2.621765380859375, 2.6223037109375, 2.621092041015625, 2.623301025390625, 2.620621826171875, 2.62457861328125, 2.6205380859375, 2.627218994140625, 2.6275390625]",tokens/s,97.56723020801203,kWh,3.09448529779911e-05,1.6958929474258183e-05,0.00014797261837800056,0.00019587640083024984,tokens/kWh,1306946.619985398,MB,4397.162496,24111.480832,0.0,23465.033728,21890.217984,s,10,1555.758796875,155.5758796875,0.01362531366110068,155.577890625,155.5897375,155.594603125,155.598495625,"[155.55940625, 155.57425, 155.583734375, 155.59946875, 155.58153125, 155.58503125, 155.568875, 155.58865625, 155.564453125, 155.553390625]",tokens/s,0.4049470915835152,kWh,0.0018369476799004608,0.0010068097738273171,0.008895798588855384,0.011739556042583163,tokens/kWh,5366.4721026484,,s,629,1576.8986267089822,2.50699304723209,0.31148250553470674,2.46929931640625,2.47089541015625,2.471475439453125,5.08945892578125,"[2.468798583984375, 2.46915185546875, 2.46936669921875, 2.469295166015625, 2.469130126953125, 2.468601806640625, 2.46879638671875, 2.46974560546875, 2.468950927734375, 2.468516845703125, 2.468890625, 2.469065673828125, 2.468599853515625, 2.46870947265625, 2.468912109375, 2.469274658203125, 2.468938720703125, 2.46896337890625, 2.4693310546875, 2.46970068359375, 2.469392333984375, 2.46949072265625, 2.47088330078125, 2.4697119140625, 2.469168212890625, 2.468552734375, 2.4692490234375, 2.4690791015625, 2.47006005859375, 2.46929931640625, 2.46951123046875, 2.46925, 2.469129150390625, 2.468915283203125, 2.46999560546875, 2.4695, 2.46917724609375, 2.4696279296875, 2.469708740234375, 2.469084228515625, 2.46938427734375, 2.468663330078125, 2.46932275390625, 2.468631591796875, 2.468556884765625, 2.46858447265625, 2.46909326171875, 2.46888232421875, 2.46854248046875, 2.469202880859375, 2.469517333984375, 2.468483154296875, 2.468864013671875, 2.46881494140625, 2.468998046875, 2.469455810546875, 2.469496826171875, 2.469419921875, 2.4694599609375, 2.469094482421875, 2.47101025390625, 2.468877197265625, 5.09138330078125, 2.468682861328125, 2.468284423828125, 2.471290771484375, 2.471075927734375, 2.47086279296875, 2.470519775390625, 2.471779296875, 2.4712724609375, 2.470220703125, 2.469166015625, 2.47142919921875, 2.469087158203125, 2.470717529296875, 2.469718994140625, 2.4686796875, 2.468135986328125, 2.469396484375, 2.47069482421875, 2.469185546875, 2.46943359375, 2.4694208984375, 2.469435302734375, 2.472340576171875, 2.46964404296875, 2.4692294921875, 2.4692080078125, 2.4687861328125, 2.4688701171875, 2.468831298828125, 2.468864013671875, 2.468359130859375, 2.468737060546875, 2.4681728515625, 2.46879345703125, 2.47047265625, 2.471103515625, 2.4709150390625, 2.469086181640625, 2.467948486328125, 2.46850244140625, 2.46847998046875, 2.468877197265625, 2.47016845703125, 2.472048583984375, 2.471004150390625, 2.46881689453125, 2.46902783203125, 2.469129150390625, 2.468443115234375, 2.468276123046875, 2.46843603515625, 2.468770751953125, 2.46885888671875, 2.468476806640625, 2.46837255859375, 2.46976318359375, 2.4682578125, 2.468187255859375, 2.467946533203125, 2.4700517578125, 2.468588623046875, 2.468370361328125, 5.09084375, 2.46865625, 2.4686162109375, 2.471500732421875, 2.47128173828125, 2.47047265625, 2.470948974609375, 2.46904638671875, 2.46915283203125, 2.468949951171875, 2.469103515625, 2.469283935546875, 2.46955615234375, 2.46936669921875, 2.46929931640625, 2.469792724609375, 2.46934521484375, 2.468339599609375, 2.469074951171875, 2.46917724609375, 2.46955322265625, 2.469060546875, 2.471446533203125, 2.471505859375, 2.471520263671875, 2.46911376953125, 2.470220703125, 2.46964111328125, 2.46999658203125, 2.469699462890625, 2.469473388671875, 2.4692724609375, 2.469899169921875, 2.469782470703125, 2.47012158203125, 2.469866455078125, 2.4691630859375, 2.46879345703125, 2.469590087890625, 2.47096533203125, 2.471996337890625, 2.46934326171875, 2.468957275390625, 2.468894775390625, 2.468242431640625, 2.470201416015625, 2.46946923828125, 2.469783447265625, 2.469652587890625, 2.469385009765625, 2.4691455078125, 2.469442626953125, 2.46881494140625, 2.470005859375, 2.469909423828125, 2.46930224609375, 2.469780517578125, 2.46929296875, 2.468728759765625, 2.46875146484375, 2.468708251953125, 2.469340087890625, 2.468431884765625, 5.08925537109375, 2.469267333984375, 2.4689326171875, 2.468461669921875, 2.470912841796875, 2.47052294921875, 2.47040625, 2.4696259765625, 2.470639404296875, 2.470507568359375, 2.469961669921875, 2.4698583984375, 2.470289306640625, 2.470327392578125, 2.470380615234375, 2.470289306640625, 2.47063037109375, 2.4716962890625, 2.4711904296875, 2.469096435546875, 2.46898583984375, 2.469234619140625, 2.46925927734375, 2.469075927734375, 2.468787109375, 2.470892578125, 2.468845458984375, 2.470190185546875, 2.470021240234375, 2.46980908203125, 2.469390380859375, 2.469427001953125, 2.469474365234375, 2.46984912109375, 2.468821044921875, 2.46932470703125, 2.46972412109375, 2.470828125, 2.470804443359375, 2.47084228515625, 2.471381103515625, 2.471739501953125, 2.4696513671875, 2.469551025390625, 2.46862744140625, 2.468588623046875, 2.468927490234375, 2.469970947265625, 2.469509033203125, 2.468726806640625, 2.470922119140625, 2.47033642578125, 2.470152099609375, 2.47086181640625, 2.469655517578125, 2.469350341796875, 2.469897216796875, 2.4691435546875, 2.468494384765625, 2.469423095703125, 2.470116455078125, 2.470024169921875, 2.4697353515625, 5.09418994140625, 2.47079833984375, 2.471482421875, 2.470703125, 2.4698798828125, 2.469234619140625, 2.469583984375, 2.469041259765625, 2.469078125, 2.468781982421875, 2.469856201171875, 2.470139892578125, 2.4699013671875, 2.46999853515625, 2.469337158203125, 2.4686396484375, 2.46900732421875, 2.46865625, 2.4688505859375, 2.46949365234375, 2.468842529296875, 2.469308349609375, 2.469699462890625, 2.469474365234375, 2.46980810546875, 2.470485107421875, 2.470642578125, 2.470287353515625, 2.46951220703125, 2.46904931640625, 2.47022900390625, 2.470436767578125, 2.46926025390625, 2.469675048828125, 2.47014306640625, 2.46932275390625, 2.468830322265625, 2.469003173828125, 2.469866455078125, 2.46951123046875, 2.468826171875, 2.469062744140625, 2.469338134765625, 2.469421142578125, 2.468864013671875, 2.4689970703125, 2.469686279296875, 2.468821044921875, 2.468958251953125, 2.46921533203125, 2.4694580078125, 2.468634521484375, 2.46913232421875, 2.4693955078125, 2.469814208984375, 2.468588623046875, 2.469433349609375, 2.470032470703125, 2.470792236328125, 2.469285888671875, 2.469370849609375, 2.469603271484375, 2.46991357421875, 5.0895380859375, 2.472522705078125, 2.472004638671875, 2.469856201171875, 2.468981689453125, 2.469172119140625, 2.46925, 2.4692265625, 2.469295166015625, 2.46936376953125, 2.47034765625, 2.468842529296875, 2.470485107421875, 2.471478271484375, 2.469750732421875, 2.468664306640625, 2.471371826171875, 2.468883544921875, 2.471439453125, 2.469444580078125, 2.46917626953125, 2.469329833984375, 2.469687255859375, 2.46869091796875, 2.469650390625, 2.468767822265625, 2.46934619140625, 2.46850048828125, 2.46826806640625, 2.46904443359375, 2.4693974609375, 2.469318603515625, 2.47453076171875, 2.469267333984375, 2.470331298828125, 2.4707685546875, 2.469550048828125, 2.46943017578125, 2.469969970703125, 2.469962646484375, 2.46923486328125, 2.468137939453125, 2.468229248046875, 2.46898779296875, 2.468890625, 2.468874267578125, 2.469168212890625, 2.470299560546875, 2.47147119140625, 2.469370849609375, 2.469350341796875, 2.470095947265625, 2.469555419921875, 2.46888232421875, 2.4688681640625, 2.469992431640625, 2.468869140625, 2.469044189453125, 2.469062744140625, 2.469525390625, 2.468809814453125, 2.46928076171875, 2.468957275390625, 5.09382470703125, 2.469845947265625, 2.4695244140625, 2.469098388671875, 2.46944677734375, 2.469396484375, 2.470770751953125, 2.469032958984375, 2.469140380859375, 2.468855712890625, 2.469170166015625, 2.469623779296875, 2.469095458984375, 2.4704697265625, 2.47050244140625, 2.4702392578125, 2.469874755859375, 2.4695224609375, 2.469214111328125, 2.469747802734375, 2.46868896484375, 2.469425048828125, 2.468915283203125, 2.4693955078125, 2.468820068359375, 2.46910986328125, 2.468548583984375, 2.4704716796875, 2.469411865234375, 2.4691220703125, 2.468193359375, 2.468788330078125, 2.4683017578125, 2.46884033203125, 2.470220703125, 2.4702197265625, 2.468990966796875, 2.468908935546875, 2.4688486328125, 2.4704501953125, 2.470517822265625, 2.470140869140625, 2.469308349609375, 2.470096923828125, 2.469972900390625, 2.4689111328125, 2.468103271484375, 2.468663330078125, 2.468252685546875, 2.468338623046875, 2.467991455078125, 2.468906982421875, 2.46822802734375, 2.468630615234375, 2.469042236328125, 2.469347412109375, 2.47062939453125, 2.471343017578125, 2.467967041015625, 2.468644775390625, 2.46843701171875, 2.4686162109375, 2.473323486328125, 5.0889111328125, 2.468672607421875, 2.4683642578125, 2.468341796875, 2.468705322265625, 2.468413330078125, 2.46852197265625, 2.468662353515625, 2.468912109375, 2.46987158203125, 2.470073486328125, 2.46920068359375, 2.469920654296875, 2.46879638671875, 2.4687412109375, 2.4697333984375, 2.4695244140625, 2.4696298828125, 2.4695244140625, 2.470005859375, 2.468862060546875, 2.47003125, 2.4691630859375, 2.468483154296875, 2.4710625, 2.469022705078125, 2.46885888671875, 2.4692705078125, 2.469971923828125, 2.47102783203125, 2.47071435546875, 2.4704482421875, 2.471227294921875, 2.470994873046875, 2.469182373046875, 2.46873095703125, 2.47003125, 2.468622314453125, 2.4721171875, 2.468535400390625, 2.47090673828125, 2.470928466796875, 2.470781982421875, 2.470194091796875, 2.470340576171875, 2.468509765625, 2.468907958984375, 2.469888916015625, 2.469969970703125, 2.47013671875, 2.472005859375, 2.47183740234375, 2.47176708984375, 2.471166015625, 2.471228515625, 2.4702783203125, 2.47012451171875, 2.468314208984375, 2.46890087890625, 2.46814208984375, 2.46915283203125, 2.467857421875, 2.46899609375, 5.0967275390625, 2.468865966796875, 2.47231494140625, 2.4689765625, 2.469561279296875, 2.469706787109375, 2.468801513671875, 2.468005859375, 2.46970068359375, 2.469357666015625, 2.470153076171875, 2.47166455078125, 2.4716962890625, 2.47134716796875, 2.4716728515625, 2.469540771484375, 2.46961865234375, 2.46875439453125, 2.468744140625, 2.469203857421875, 2.46841748046875, 2.469667724609375, 2.4690810546875, 2.46848828125, 2.468600830078125, 2.469570556640625, 2.46897265625, 2.468644775390625, 2.46887109375, 2.46993310546875, 2.469518310546875, 2.468893798828125, 2.469458740234375, 2.4689755859375, 2.469214111328125, 2.468822998046875, 2.468765625, 2.469544921875, 2.46893359375, 2.46794140625, 2.46793115234375, 2.468474853515625, 2.468509765625, 2.469032958984375, 2.468798583984375, 2.46907177734375, 2.469205078125, 2.46849853515625, 2.468729736328125, 2.469506103515625, 2.4690390625, 2.4686376953125, 2.4710419921875, 2.469719970703125, 2.4685322265625, 2.468116455078125, 2.468727783203125, 2.4698369140625, 2.46875244140625, 2.46940869140625, 2.469123046875, 2.47010205078125, 2.468192138671875, 5.09570947265625, 2.469843994140625, 2.469245849609375, 2.46862939453125, 2.469123046875, 2.470595703125, 2.46889990234375, 2.468484130859375, 2.468884521484375, 2.469498779296875, 2.46938720703125, 2.468440185546875, 2.4687841796875, 2.46934326171875, 2.468601806640625, 2.469178466796875, 2.46820654296875, 2.469718017578125, 2.469128173828125, 2.468865966796875, 2.468513671875, 2.46963525390625, 2.468724609375, 2.4697353515625, 2.468211669921875, 2.468957275390625, 2.4694794921875, 2.46911083984375, 2.4683447265625, 2.4698837890625, 2.468577392578125, 2.468810791015625, 2.468404296875, 2.46858251953125, 2.469440673828125, 2.469866455078125, 2.468959228515625, 2.4695625, 2.46875732421875, 2.46845849609375, 2.469051513671875, 2.46925830078125, 2.470119384765625, 2.468737060546875, 2.468697998046875, 2.469822509765625, 2.469697509765625, 2.4686376953125, 2.4682802734375, 2.468843505859375, 2.471318603515625, 2.4700693359375, 2.469341064453125, 2.469017578125, 2.469211181640625, 2.469179443359375, 2.468263916015625, 2.46847998046875, 2.4693955078125, 2.468968505859375, 2.469316650390625, 2.46883740234375, 2.46984814453125]",tokens/s,0.39888423348603846,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c49-09d1ade27cf4465e6d659ed5;0ae41f6a-189d-4c8a-a921-1dcd9f869cfb) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3319-0ec310710a67fbaf27b77d84;82bffa17-8794-4712-84a0-248690555fba) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3956.158464,12732.33408,0.0,12085.886976,11337.370624,s,10,10.987366088867189,1.098736608886719,0.001768964710489349,1.0985178833007812,1.1007381103515623,1.101008154296875,1.101224189453125,"[1.1001180419921874, 1.1012781982421875, 1.0972359619140626, 1.0976529541015625, 1.097395263671875, 1.09569873046875, 1.0975172119140626, 1.0993828125, 1.1004088134765626, 1.1006781005859374]",tokens/s,232.99487605076595,kWh,1.295269936323166e-05,7.0975890679255816e-06,6.178085498018682e-05,8.183114341134406e-05,tokens/kWh,3128393.2904756567,MB,3956.158464,12732.33408,0.0,12085.886976,11686.804992,s,10,644.0755078125,64.40755078125,0.02043927244380867,64.406017578125,64.42852109374999,64.437791796875,64.445208359375,"[64.41949609375, 64.4215078125, 64.3984453125, 64.3965, 64.3855703125, 64.37932421875, 64.38755078125, 64.41358984375, 64.4470625, 64.4264609375]",tokens/s,0.9781461837288221,kWh,0.0007606077267395126,0.000416880381825722,0.003642305219397435,0.00481979332796267,tokens/kWh,13071.09988191758,,s,629,652.9415617065439,1.038062896194822,0.13045675837303516,1.0222479248046874,1.02322421875,1.0235658081054688,2.119142734375,"[1.0216539916992187, 1.0224589233398438, 1.0223789672851562, 1.0224598999023438, 1.0228684692382812, 1.0230978393554688, 1.0232852783203126, 1.0228684692382812, 1.0223707885742188, 1.0226544799804687, 1.0221055908203125, 1.022455810546875, 1.0223565063476563, 1.0220953369140624, 1.02287255859375, 1.0226544799804687, 1.0230180053710938, 1.0228029174804687, 1.022360595703125, 1.0219008178710938, 1.02221826171875, 1.0227445678710938, 1.0227865600585937, 1.0227056884765624, 1.0226657104492187, 1.0222479248046874, 1.0227005615234375, 1.0229114990234376, 1.0229288940429688, 1.0229381103515625, 1.0223861694335938, 1.0226493530273437, 1.0219724731445312, 1.02327294921875, 1.0225264892578125, 1.021897705078125, 1.0218536987304687, 1.0230333251953125, 1.0221629638671874, 1.022266357421875, 1.0232105102539062, 1.0222919921875, 1.0218916015625, 1.0232176513671876, 1.0224496459960937, 1.0222622680664062, 1.0223595581054687, 1.0233876342773438, 1.0225960693359375, 1.0226697998046874, 1.0229954833984376, 1.0221025390625, 1.0217092895507813, 1.0235074462890625, 1.0223370361328126, 1.0225868530273436, 1.0227589111328126, 1.0223380737304688, 1.0219468994140626, 1.02196533203125, 1.0226288452148438, 1.0231378173828125, 2.124905517578125, 1.0222356567382813, 1.021960205078125, 1.022509033203125, 1.0222704467773438, 1.0222008056640626, 1.0229309692382813, 1.0228899536132812, 1.0222418212890625, 1.022761962890625, 1.0235012817382811, 1.0224527587890626, 1.0228264770507813, 1.0230067138671874, 1.022603271484375, 1.022150634765625, 1.0228582153320311, 1.0227486572265625, 1.0225458984375, 1.023267822265625, 1.0228500366210938, 1.0230599975585937, 1.02280908203125, 1.0225940551757813, 1.0225387573242188, 1.0220328979492188, 1.0222684326171876, 1.0227660522460937, 1.0227394409179686, 1.0236610717773438, 1.0233159790039061, 1.02287158203125, 1.0244444580078125, 1.0236958618164063, 1.022750732421875, 1.0225233764648438, 1.0231818237304688, 1.0220595092773437, 1.0220052490234375, 1.0232534790039063, 1.0219489135742188, 1.02171337890625, 1.0222592163085937, 1.0224414672851563, 1.023140869140625, 1.0223493041992187, 1.0228746337890624, 1.0218157958984375, 1.02228173828125, 1.02193359375, 1.0220175170898438, 1.0220697631835938, 1.02232470703125, 1.0216427612304688, 1.0218946533203126, 1.0215690307617187, 1.022439453125, 1.02179736328125, 1.0224251098632813, 1.0220114135742187, 1.0223197021484376, 1.0227127685546875, 1.0225541381835936, 2.119232421875, 1.0225018920898437, 1.022055419921875, 1.0218680419921875, 1.0223175659179689, 1.0221229858398437, 1.0219366455078125, 1.0225018920898437, 1.0220114135742187, 1.0218792724609376, 1.0220257568359374, 1.0222643432617187, 1.0220001220703125, 1.0221516723632813, 1.0221260986328125, 1.0216611938476563, 1.0218567504882812, 1.0222837524414063, 1.022792724609375, 1.0223380737304688, 1.0225889282226563, 1.0225274658203125, 1.0219089965820312, 1.0221567993164062, 1.0223544311523438, 1.0222837524414063, 1.0220943603515624, 1.022055419921875, 1.0220431518554687, 1.0217420654296876, 1.022118896484375, 1.0222120971679687, 1.02186083984375, 1.0227660522460937, 1.0224302368164062, 1.0219202270507812, 1.0222950439453125, 1.022434326171875, 1.0217748413085936, 1.0216837158203125, 1.021929443359375, 1.0220933227539062, 1.0220226440429687, 1.0222172241210938, 1.021971435546875, 1.0217420654296876, 1.0220123901367189, 1.0219243774414062, 1.0223441772460937, 1.022044189453125, 1.022455810546875, 1.0223093872070312, 1.0240828857421875, 1.0233630981445312, 1.0228223876953124, 1.0220809936523438, 1.022814208984375, 1.0219632568359376, 1.0219100341796874, 1.021834228515625, 1.0227333374023437, 1.0218475341796875, 1.0219889526367187, 2.119702392578125, 1.0218157958984375, 1.0222847900390626, 1.0225991821289062, 1.0219386596679687, 1.0222909545898438, 1.0225694580078124, 1.02200732421875, 1.0223073120117188, 1.0221957397460937, 1.0222427978515625, 1.0221383666992188, 1.0221312255859376, 1.022075927734375, 1.0216263427734376, 1.0212208862304688, 1.02229296875, 1.0224076538085938, 1.023636474609375, 1.0234900512695313, 1.023056884765625, 1.0234654541015624, 1.0236641235351562, 1.02160791015625, 1.0215352172851562, 1.0216673583984375, 1.021822998046875, 1.0217000732421875, 1.022213134765625, 1.02185986328125, 1.0224988403320312, 1.022687255859375, 1.0223042602539063, 1.0217113647460938, 1.0225919799804688, 1.022482421875, 1.022055419921875, 1.0219806518554688, 1.023088623046875, 1.0223472900390624, 1.021929443359375, 1.0223493041992187, 1.0222120971679687, 1.0214901733398438, 1.0219417724609374, 1.0221721801757813, 1.0221773071289062, 1.0221752319335937, 1.0220165405273438, 1.0218884887695312, 1.0218833618164063, 1.0219745483398437, 1.0215157470703125, 1.0211686401367188, 1.0218997802734375, 1.0223994750976562, 1.0220114135742187, 1.022298095703125, 1.02169189453125, 1.0218690795898437, 1.021822998046875, 1.0221168823242188, 1.0219120483398438, 2.118912109375, 1.0223964233398437, 1.0216151123046875, 1.022171142578125, 1.0217677001953125, 1.0219642944335938, 1.0219970703125, 1.0222827758789061, 1.0216611938476563, 1.021549560546875, 1.0215291137695313, 1.0223114013671875, 1.022255126953125, 1.0225100708007813, 1.0217799682617188, 1.02200830078125, 1.0224179077148436, 1.0221260986328125, 1.0218782958984376, 1.0218884887695312, 1.0225499877929687, 1.022076904296875, 1.0219366455078125, 1.0222387084960938, 1.0217011108398437, 1.0217328491210937, 1.02257666015625, 1.0215403442382813, 1.0214144287109375, 1.0214072265625, 1.0221701049804688, 1.0217277221679688, 1.0222387084960938, 1.0218997802734375, 1.0216539916992187, 1.0216837158203125, 1.0219857788085938, 1.0216980590820313, 1.0217205810546874, 1.0214840087890624, 1.02214453125, 1.0218076171875, 1.0222633056640624, 1.0221834106445313, 1.0223790283203125, 1.0221107177734374, 1.022223388671875, 1.0214297485351562, 1.0217717895507812, 1.0216028442382812, 1.0226585693359376, 1.0215946044921875, 1.0224056396484376, 1.0217778930664063, 1.0220472412109376, 1.0222387084960938, 1.0221731567382812, 1.0224568481445313, 1.0223892211914063, 1.021928466796875, 1.0224486694335937, 1.0220421142578124, 1.022455810546875, 2.117295166015625, 1.0219315185546876, 1.0214788818359375, 1.0215782470703125, 1.021528076171875, 1.0218690795898437, 1.0219069213867187, 1.0218506469726563, 1.0224097290039063, 1.0219745483398437, 1.0218424072265626, 1.021686767578125, 1.0215823364257812, 1.0215782470703125, 1.021676513671875, 1.0219089965820312, 1.02179736328125, 1.0221465454101561, 1.0219561157226562, 1.021676513671875, 1.0220635986328126, 1.0227415161132813, 1.0216591186523438, 1.0214799194335937, 1.021432861328125, 1.0216959838867188, 1.021422607421875, 1.0225029296875, 1.0220267333984374, 1.021981689453125, 1.022392333984375, 1.0219192504882812, 1.021675537109375, 1.0215321655273437, 1.0215844116210937, 1.0223329467773437, 1.0216427612304688, 1.0223380737304688, 1.0223411254882813, 1.0224465942382812, 1.0220513305664063, 1.021770751953125, 1.0217267456054688, 1.0211819458007811, 1.0219478759765626, 1.0219786376953126, 1.0217195434570312, 1.02196533203125, 1.0216980590820313, 1.0216949462890625, 1.0220328979492188, 1.02213427734375, 1.0221680908203126, 1.02171337890625, 1.0219458618164063, 1.021823974609375, 1.022129150390625, 1.0220892333984375, 1.0217769165039063, 1.022181396484375, 1.0221486206054688, 1.0219581298828124, 1.0223165283203124, 2.1193359375, 1.021507568359375, 1.0222807006835937, 1.0218096923828126, 1.0217789306640626, 1.0224578857421875, 1.0224793701171875, 1.0222202758789063, 1.0224015502929686, 1.0220513305664063, 1.0227291870117188, 1.0218936157226564, 1.0227210083007812, 1.0215659790039062, 1.021507568359375, 1.0218588256835937, 1.0215946044921875, 1.0212608032226562, 1.0221383666992188, 1.021644775390625, 1.0220400390625, 1.0219151611328126, 1.0219458618164063, 1.0218168334960938, 1.0220155029296876, 1.0220830688476563, 1.0217257080078126, 1.0215249633789063, 1.0215782470703125, 1.0214522705078124, 1.021971435546875, 1.0216908569335938, 1.0222796630859374, 1.0217891845703124, 1.0220093383789062, 1.0221598510742187, 1.0217533569335937, 1.021591552734375, 1.021834228515625, 1.0217297973632813, 1.0220697631835938, 1.0221373291015625, 1.021681640625, 1.021823974609375, 1.022286865234375, 1.0229125366210938, 1.0224752807617188, 1.0218772583007814, 1.0224363403320313, 1.02299853515625, 1.021749267578125, 1.022983154296875, 1.0218731689453124, 1.0215403442382813, 1.022688232421875, 1.0222745361328125, 1.0221066284179687, 1.021812744140625, 1.0228449096679688, 1.0219735107421875, 1.0219089965820312, 1.0222427978515625, 1.0222807006835937, 2.121678955078125, 1.0220349731445313, 1.0220318603515626, 1.022308349609375, 1.0220912475585937, 1.0223062744140625, 1.0220667114257813, 1.0221168823242188, 1.0219243774414062, 1.0236375122070311, 1.0226729125976564, 1.022350341796875, 1.0221209716796875, 1.0218065795898437, 1.0220042114257812, 1.02285107421875, 1.0233190307617188, 1.0220626220703124, 1.0221178588867188, 1.0219304809570313, 1.0225458984375, 1.0225377197265626, 1.0221055908203125, 1.0222172241210938, 1.0222172241210938, 1.0232381591796875, 1.0236713256835936, 1.02211376953125, 1.0223145141601562, 1.0224046020507813, 1.0224127807617187, 1.0221404418945312, 1.0220615844726562, 1.0223544311523438, 1.0224671020507812, 1.0220564575195312, 1.02242919921875, 1.0219437866210936, 1.0222633056640624, 1.0225182495117187, 1.0220728149414062, 1.0216908569335938, 1.0225111083984375, 1.0221690673828125, 1.0233764038085937, 1.0224005126953124, 1.0228797607421876, 1.022582763671875, 1.0221362915039063, 1.0220574951171875, 1.0226176147460937, 1.023177734375, 1.022688232421875, 1.0220369873046875, 1.0229730224609375, 1.0224833984375, 1.0232719116210938, 1.0233231201171875, 1.0232688598632813, 1.0227711791992187, 1.02242919921875, 1.0218782958984376, 1.0227630004882813, 2.123450439453125, 1.0225910034179688, 1.022224365234375, 1.0221588745117187, 1.0227947387695313, 1.0223759155273437, 1.0220062866210937, 1.021928466796875, 1.0225244140625, 1.0222254028320312, 1.0220328979492188, 1.0240491943359376, 1.0243778076171874, 1.0222274780273437, 1.0225735473632813, 1.02270361328125, 1.0226463012695313, 1.0230241088867187, 1.023151123046875, 1.0230661010742188, 1.022940185546875, 1.0235166625976562, 1.0226575317382813, 1.0224097290039063, 1.02308349609375, 1.0247874755859374, 1.0235719604492188, 1.0233917236328125, 1.023362060546875, 1.0236497802734374, 1.0237828979492187, 1.0238555908203124, 1.024322509765625, 1.0232586059570312, 1.0231644287109376, 1.023604736328125, 1.0232422485351562, 1.0228469848632813, 1.0233712768554688, 1.0233978881835937, 1.0237593383789063, 1.02187109375, 1.0220492553710938, 1.0218803100585938, 1.023657958984375, 1.0220001220703125, 1.0220830688476563, 1.0218035278320312, 1.02250390625, 1.02404296875, 1.0239928588867186, 1.0232227783203125, 1.0231644287109376, 1.0232197265625, 1.0240625, 1.0235238647460938, 1.022983154296875, 1.0219345703125, 1.0223042602539063, 1.022192626953125, 1.0235565795898438, 1.0230742797851562, 1.0222684326171876, 2.122977294921875, 1.021991943359375, 1.0222418212890625, 1.022287841796875, 1.0231869506835938, 1.0230241088867187, 1.022814208984375, 1.022866455078125, 1.0224425048828125, 1.0229033203125, 1.0226390991210939, 1.0222633056640624, 1.0229155883789063, 1.0221915893554687, 1.0231552124023438, 1.0233661499023436, 1.0227711791992187, 1.0222151489257814, 1.0229217529296875, 1.0231818237304688, 1.0223114013671875, 1.0220543823242187, 1.02232373046875, 1.0222356567382813, 1.0230077514648437, 1.0234613647460937, 1.0228264770507813, 1.0229319458007813, 1.024480224609375, 1.0225131225585937, 1.0220369873046875, 1.0220902099609375, 1.0221383666992188, 1.0218895263671874, 1.0225633544921875, 1.0228480224609375, 1.0221834106445313, 1.0225643310546875, 1.0235330810546874, 1.02322998046875, 1.0229186401367187, 1.0226974487304688, 1.0228060302734374, 1.022993408203125, 1.023494140625, 1.0230015869140625, 1.0222899169921875, 1.0222520141601563, 1.0227855224609375, 1.0226903076171876, 1.022087158203125, 1.0217963256835938, 1.0218424072265626, 1.0221260986328125, 1.0221178588867188, 1.0227445678710938, 1.023115234375, 1.0224527587890626, 1.0226483154296875, 1.022645263671875, 1.022792724609375, 1.0225899658203126, 1.0227425537109376]",tokens/s,0.9633327649660273,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1859.03104,6201.802752,0.0,5555.355648,5273.986048,s,10,6.183414916992188,0.6183414916992188,0.0011184543236210387,0.618503173828125,0.6196270629882813,0.6198319030761719,0.6199957751464844,"[0.6192075805664062, 0.6172159423828125, 0.6163760986328125, 0.6177174072265625, 0.6180655517578125, 0.6173275146484375, 0.6189407958984375, 0.6189457397460938, 0.61958154296875, 0.6200367431640625]",tokens/s,414.010709998621,kWh,7.2943227158652426e-06,3.997002324692457e-06,3.5828705133529186e-05,4.7120030174086885e-05,tokens/kWh,5432933.70259309,MB,1859.03104,6201.802752,0.0,5555.355648,5324.908032,s,10,366.93543359375,36.693543359375,0.025425135768351863,36.68483984375,36.71502578125,36.739856640625,36.759721328125,"[36.6825078125, 36.6820625, 36.67597265625, 36.67723828125, 36.69059375, 36.7646875, 36.68741015625, 36.687171875, 36.7095078125, 36.67828125]",tokens/s,1.7169233121745884,kWh,0.0004334749158389039,0.00023758188798711938,0.002030957448294073,0.002702014252120096,tokens/kWh,23315.939192610833,,s,629,371.9142907714842,0.5912786816716763,0.07347096675901024,0.5822586669921875,0.5836932861328125,0.5840418579101562,1.2002971240234375,"[0.5831044921875, 0.5822699584960938, 0.5834168090820312, 0.58437939453125, 0.5840988159179688, 0.583689208984375, 0.5825567016601563, 0.5822218017578125, 0.5818173217773438, 0.5815992431640625, 0.5817159423828125, 0.5822648315429687, 0.5821982421875, 0.5821337890625, 0.5815695190429687, 0.5815643920898438, 0.582739990234375, 0.5819013061523437, 0.5815090942382812, 0.5818992919921875, 0.5820938110351562, 0.5823037719726563, 0.5815859375, 0.5816412353515625, 0.5814845581054687, 0.5814404907226562, 0.5816504516601563, 0.5815562133789063, 0.5829365844726563, 0.5825740966796875, 0.5817200927734375, 0.5818470458984375, 0.5818685302734375, 0.5846865844726562, 0.581960693359375, 0.5823938598632813, 0.582456298828125, 0.5820784912109375, 0.581928955078125, 0.5816002807617188, 0.5820436401367187, 0.5819269409179687, 0.581781494140625, 0.5828024291992188, 0.582518798828125, 0.583046142578125, 0.58231396484375, 0.58227197265625, 0.5819197387695313, 0.58180712890625, 0.5825986328125, 0.5820835571289062, 0.5818644409179687, 0.5822269287109375, 0.582245361328125, 0.5821522216796875, 0.5817988891601562, 0.5820927734375, 0.5820047607421875, 0.5820805053710938, 0.582076416015625, 0.5821747436523438, 1.2006451416015624, 0.5828607788085938, 0.5824737548828125, 0.5826815795898438, 0.5828382568359375, 0.582724609375, 0.5826375732421875, 0.5826047973632813, 0.5828505859375, 0.5823723754882812, 0.582371337890625, 0.5822054443359375, 0.5829539794921875, 0.5826570434570313, 0.5828075561523437, 0.582297607421875, 0.5822361450195312, 0.5817088012695313, 0.5831546630859376, 0.5821470947265625, 0.58178564453125, 0.5819739990234375, 0.5819053955078125, 0.581644287109375, 0.5816033325195312, 0.58183984375, 0.5819576416015625, 0.5817825317382812, 0.5819330444335937, 0.5823958740234375, 0.5825853271484375, 0.5823047485351562, 0.5824429931640625, 0.5819678955078125, 0.5820170288085937, 0.58241845703125, 0.5822371826171875, 0.5820948486328125, 0.581960693359375, 0.5820579833984375, 0.5817855834960938, 0.5820006103515625, 0.5823180541992188, 0.5817886962890625, 0.5818910522460937, 0.5815715942382812, 0.5831065673828125, 0.5818674926757812, 0.5818327026367187, 0.5820958862304687, 0.5820795288085937, 0.5822504272460938, 0.5827584228515625, 0.58241845703125, 0.5819842529296875, 0.5818224487304687, 0.5830155029296875, 0.5818920288085937, 0.58241943359375, 0.5820692749023437, 0.5821624145507812, 0.58189208984375, 0.582134765625, 1.199605712890625, 0.5822044067382812, 0.58277685546875, 0.5820149536132813, 0.5822853393554688, 0.5823651733398437, 0.5825372314453126, 0.5818859252929688, 0.58189111328125, 0.5824112548828125, 0.5814589233398437, 0.5815828247070313, 0.5821522216796875, 0.5817999267578124, 0.5821306762695313, 0.5816801147460937, 0.5815941162109375, 0.5823600463867188, 0.5821112060546875, 0.5825054931640625, 0.5818009643554688, 0.5820743408203125, 0.5816873168945312, 0.58237646484375, 0.5829601440429687, 0.5822699584960938, 0.5826181030273437, 0.582033447265625, 0.5821142578125, 0.5820088500976562, 0.581875732421875, 0.5822617797851563, 0.581992431640625, 0.5820211181640625, 0.5821327514648438, 0.5825167236328125, 0.583541748046875, 0.5822965698242187, 0.5826570434570313, 0.5821788330078125, 0.5822116088867187, 0.5822228393554687, 0.582134765625, 0.5822730102539062, 0.5822904052734375, 0.5818532104492188, 0.581939208984375, 0.58212353515625, 0.5819115600585938, 0.581917724609375, 0.581796875, 0.5814108276367187, 0.5820999755859375, 0.581823486328125, 0.5824778442382812, 0.5824727172851563, 0.58199658203125, 0.5818797607421875, 0.582181884765625, 0.5825372314453126, 0.5822669067382813, 0.582498291015625, 0.582295654296875, 1.1999998779296874, 0.5822935180664063, 0.5821593627929688, 0.5819166870117187, 0.58216552734375, 0.5825269775390625, 0.5819852905273437, 0.5826549682617187, 0.58295703125, 0.5822156982421876, 0.5825126342773438, 0.5818338012695312, 0.5822903442382813, 0.5818214111328125, 0.5828648681640625, 0.5822689208984375, 0.5817630615234375, 0.5818245239257812, 0.5817979736328125, 0.582257568359375, 0.5819883422851563, 0.5817487182617187, 0.5817006225585938, 0.5819115600585938, 0.5828761596679688, 0.5822597045898438, 0.5822843017578125, 0.5820897216796875, 0.5826027221679687, 0.581818359375, 0.5817947998046875, 0.5823662109375, 0.5819771118164062, 0.5821470947265625, 0.581907470703125, 0.5817671508789063, 0.5819443359375, 0.5819330444335937, 0.5819166870117187, 0.5820835571289062, 0.5821214599609374, 0.5826129760742188, 0.5820989379882813, 0.5827706909179687, 0.5828003540039063, 0.5829396362304687, 0.5819617309570313, 0.5818541870117188, 0.5825341186523437, 0.5824102172851563, 0.582561767578125, 0.582096923828125, 0.58191259765625, 0.581517333984375, 0.5817364501953125, 0.5814691772460937, 0.5818265380859375, 0.5818490600585937, 0.5822433471679688, 0.5819934692382812, 0.5829918823242187, 0.5824635009765625, 0.5829898071289062, 1.201005615234375, 0.5828812866210937, 0.5822740478515624, 0.58203955078125, 0.58253515625, 0.58185009765625, 0.58208154296875, 0.5819259033203125, 0.58195458984375, 0.5821777954101562, 0.5819033813476563, 0.5821747436523438, 0.5821358032226562, 0.5823917846679687, 0.58222900390625, 0.5823723754882812, 0.5822586669921875, 0.5819432983398437, 0.5823989868164062, 0.582287353515625, 0.5819432983398437, 0.5823098754882813, 0.5821470947265625, 0.58201904296875, 0.5821522216796875, 0.5817927856445313, 0.58224951171875, 0.5818756713867187, 0.5823150024414062, 0.5823600463867188, 0.5821900634765625, 0.5823775024414063, 0.5833564453125, 0.5822730102539062, 0.583024658203125, 0.5828505859375, 0.5830850830078125, 0.5825567016601563, 0.5824440307617188, 0.58309326171875, 0.5827573852539063, 0.58252392578125, 0.5825177612304687, 0.5825382690429688, 0.5826395874023438, 0.582086669921875, 0.5830287475585938, 0.5831290893554687, 0.5821757202148438, 0.5823723754882812, 0.5831546630859376, 0.5824204711914063, 0.5822750854492188, 0.5826437377929687, 0.582055908203125, 0.5820119018554688, 0.5823989868164062, 0.5829959716796875, 0.5821931762695313, 0.5819218139648438, 0.5824594116210937, 0.5828392944335937, 0.5823467407226562, 1.2004127197265626, 0.5823897705078125, 0.5821522216796875, 0.5828423461914063, 0.58172314453125, 0.5815429077148437, 0.5822146606445312, 0.5822965698242187, 0.5824727172851563, 0.5832130737304687, 0.5837701416015625, 0.5834608764648438, 0.58389404296875, 0.583257080078125, 0.5831526489257812, 0.5838325805664063, 0.5842800903320312, 0.5840598754882812, 0.5840148315429687, 0.5834495849609375, 0.5838704833984375, 0.5837393798828125, 0.58374658203125, 0.5832683715820313, 0.5839451904296875, 0.5835653076171875, 0.583952392578125, 0.5841285400390624, 0.5839216918945312, 0.584069091796875, 0.5833717651367187, 0.584079345703125, 0.5822791748046875, 0.58410595703125, 0.5837250366210938, 0.5839124755859375, 0.5839564819335937, 0.583910400390625, 0.5855027465820313, 0.5835325317382812, 0.5842186279296875, 0.5839196166992188, 0.5835909423828125, 0.5837138061523437, 0.5835827026367187, 0.5830758666992187, 0.5837998046875, 0.5836973876953125, 0.5838069458007813, 0.5841008911132812, 0.5842042846679687, 0.5843200073242187, 0.584690673828125, 0.5837639770507812, 0.5840875244140625, 0.58410595703125, 0.5839616088867188, 0.5835899047851563, 0.5849579467773437, 0.5842135009765625, 0.5833236694335937, 0.5823897705078125, 0.581928955078125, 1.2009288330078125, 0.582350830078125, 0.58395751953125, 0.5835161743164062, 0.5829293823242188, 0.5832315063476563, 0.5834116821289063, 0.5836349487304687, 0.5841828002929688, 0.5820057373046875, 0.5825576782226562, 0.5837035522460937, 0.5828607788085938, 0.5823723754882812, 0.5838561401367187, 0.58364111328125, 0.5828628540039062, 0.5813483276367187, 0.581317626953125, 0.5815889892578125, 0.58169140625, 0.5815162963867188, 0.58185009765625, 0.582002685546875, 0.5819638061523438, 0.5832847290039063, 0.5816729736328125, 0.5818470458984375, 0.5819771118164062, 0.5817507934570313, 0.5814476928710938, 0.5819402465820313, 0.5820078125, 0.581517333984375, 0.581465087890625, 0.5822945556640625, 0.5821337890625, 0.5815838623046875, 0.5820989990234375, 0.5824296875, 0.5820457153320312, 0.5824461059570313, 0.5824737548828125, 0.5818050537109375, 0.582128662109375, 0.5824798583984375, 0.5821214599609374, 0.5822095336914063, 0.5823784790039063, 0.582134765625, 0.5822146606445312, 0.5818245239257812, 0.5819381713867188, 0.5824696044921875, 0.5824706420898438, 0.5826283569335937, 0.58235595703125, 0.5820753784179687, 0.5823877563476563, 0.582424560546875, 0.58216552734375, 0.5818204345703125, 0.5826416625976563, 1.201427490234375, 0.5822935180664063, 0.582170654296875, 0.5820518188476562, 0.5819218139648438, 0.5819739990234375, 0.5826621704101562, 0.5824409790039062, 0.5830502319335937, 0.582667236328125, 0.5825014038085937, 0.5823467407226562, 0.5828045043945312, 0.5823252563476562, 0.5821880493164062, 0.5829171142578125, 0.582118408203125, 0.5825382690429688, 0.5823733520507812, 0.5822125854492187, 0.5824266357421874, 0.5821091918945313, 0.5821173706054688, 0.5825208129882813, 0.5826007080078125, 0.5825556640625, 0.5820579833984375, 0.5825986328125, 0.582476806640625, 0.5827225341796874, 0.5821777954101562, 0.5822300415039062, 0.58294580078125, 0.5820938720703125, 0.5819207153320313, 0.5821747436523438, 0.582129638671875, 0.5825044555664063, 0.5818736572265625, 0.5816575927734375, 0.5821951904296875, 0.5822125854492187, 0.5821552734375, 0.5820139770507813, 0.5825054931640625, 0.5818880004882813, 0.5819913940429687, 0.5824952392578125, 0.5821358032226562, 0.5826898193359376, 0.582197265625, 0.5821798095703125, 0.582339599609375, 0.5823529052734375, 0.5823723754882812, 0.5820682373046875, 0.582150146484375, 0.5822054443359375, 0.5825587158203125, 0.5829376220703125, 0.5822269287109375, 0.5832161254882813, 0.5821911010742188, 1.2029603271484375, 0.5822730102539062, 0.5827010498046875, 0.5821941528320312, 0.5827747802734375, 0.5820374755859375, 0.582319091796875, 0.5819852905273437, 0.5823017578125, 0.5824746704101562, 0.5826314086914063, 0.582255615234375, 0.5823743896484375, 0.5821563110351563, 0.5829775390625, 0.5822258911132813, 0.5821051025390624, 0.582297607421875, 0.5837189331054687, 0.5839871826171875, 0.5838991088867187, 0.5836922607421875, 0.584005615234375, 0.5837537231445312, 0.583568359375, 0.5831055297851563, 0.5834229736328125, 0.5836431274414062, 0.5840794067382813, 0.5837383422851562, 0.58393701171875, 0.5841243896484375, 0.5823876953125, 0.5833635864257812, 0.58427392578125, 0.5820712890625, 0.5819453735351563, 0.5823057861328125, 0.582002685546875, 0.5819166870117187, 0.5829539794921875, 0.5820671997070312, 0.5822361450195312, 0.5820221557617188, 0.5820999755859375, 0.58199755859375, 0.5821737060546875, 0.583208984375, 0.5831434326171875, 0.5823364868164063, 0.5827174682617188, 0.5824962768554688, 0.5822781372070313, 0.5826682739257812, 0.5827501831054688, 0.5818245239257812, 0.5817589721679688, 0.5818521728515625, 0.58195556640625, 0.58195458984375, 0.582002685546875, 0.5821552734375, 0.5824307250976563, 1.2025272216796874, 0.5821696166992187, 0.5821286010742187, 0.582107177734375, 0.5820220947265625, 0.5823426513671875, 0.5817620239257812, 0.5823989868164062, 0.5820825805664063, 0.581781494140625, 0.5816698608398437, 0.581696533203125, 0.5820774536132812, 0.5819094848632812, 0.581712890625, 0.5817139282226562, 0.5817886962890625, 0.581992431640625, 0.5820774536132812, 0.5824501953125, 0.5818746948242187, 0.5817753295898438, 0.5824921875, 0.5816432495117188, 0.582413330078125, 0.58231396484375, 0.5824603881835938, 0.5824594116210937, 0.5828782348632813, 0.5824542846679688, 0.582302734375, 0.5820313720703125, 0.58241943359375, 0.5823294067382813, 0.5827665405273438, 0.5818951416015625, 0.5827501831054688, 0.5819842529296875, 0.5818511352539063, 0.5822074584960938, 0.5821696166992187, 0.5824000244140625, 0.582150146484375, 0.582055908203125, 0.5819166870117187, 0.5821051025390624, 0.58183984375, 0.581707763671875, 0.5821583251953125, 0.5820416259765625, 0.581970947265625, 0.5821788330078125, 0.582245361328125, 0.58267138671875, 0.5819248657226562, 0.58211328125, 0.5825842895507812, 0.5821829223632813, 0.5829417114257812, 0.582345703125, 0.5822811889648437, 0.5821439819335937, 0.5820999755859375]",tokens/s,1.6912498809745307,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp549h1fq_/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2221.408256,3330.801664,0.0,2684.35456,2447.595008,s,10,2.311121505737305,0.2311121505737305,0.0008602880707090342,0.23093637084960938,0.23197196044921875,0.23249637908935547,0.23291591400146486,"[0.2330207977294922, 0.23185542297363282, 0.23051023864746092, 0.23036329650878906, 0.2304237823486328, 0.2300004119873047, 0.23067584228515625, 0.2311968994140625, 0.231619384765625, 0.23145542907714844]",tokens/s,1107.6873256749418,kWh,2.719861554979074e-06,1.4903567604051204e-06,1.2427491002590687e-05,1.6637709317974884e-05,tokens/kWh,15386733.54050159,MB,2222.546944,3330.801664,0.0,2684.35456,2597.68064,s,10,135.98273144531248,13.598273144531248,0.005626597165862265,13.59927783203125,13.6021646484375,13.60570927734375,13.60854498046875,"[13.60925390625, 13.5985986328125, 13.5982265625, 13.6012080078125, 13.59995703125, 13.5903544921875, 13.5950361328125, 13.600318359375, 13.601376953125, 13.5884013671875]",tokens/s,4.63294120734267,kWh,0.00016051297066488652,8.797306729544289e-05,0.0007245357930120089,0.0009730218309723382,tokens/kWh,64746.74873126358,,s,629,137.8431466979981,0.21914649713513207,0.027476947243743017,0.21579168701171875,0.2161471496582031,0.2163871795654297,0.4466235400390625,"[0.2167510986328125, 0.21612850952148438, 0.2157936706542969, 0.21595852661132814, 0.21579776000976564, 0.21578341674804688, 0.21598515319824219, 0.21577626037597655, 0.21600563049316407, 0.21667738342285156, 0.2159656982421875, 0.21651968383789064, 0.21600767517089844, 0.21581004333496093, 0.21593600463867188, 0.21574656677246093, 0.2157589111328125, 0.21606707763671876, 0.2157782440185547, 0.21572096252441406, 0.2157322235107422, 0.21579058837890625, 0.21588172912597656, 0.21599232482910155, 0.2161080322265625, 0.21578341674804688, 0.21588172912597656, 0.21578341674804688, 0.21624217224121095, 0.21602406311035155, 0.2159482879638672, 0.21581619262695312, 0.21592169189453125, 0.21585302734375, 0.2157742004394531, 0.21580697631835938, 0.2158233642578125, 0.21585305786132813, 0.216163330078125, 0.2158858184814453, 0.21590425109863282, 0.2159718475341797, 0.2159482879638672, 0.21587660217285157, 0.2160394287109375, 0.21591244506835938, 0.2157660217285156, 0.21589913940429686, 0.21592576599121094, 0.21606399536132812, 0.21596365356445313, 0.2158428192138672, 0.21607833862304687, 0.21651866149902343, 0.21638246154785157, 0.21635379028320312, 0.21680844116210937, 0.21638656616210938, 0.21633331298828126, 0.21572709655761718, 0.21570457458496095, 0.21581517028808594, 0.448078857421875, 0.21560012817382812, 0.21530323791503905, 0.21556626892089845, 0.21550796508789063, 0.2154915771484375, 0.21565951538085937, 0.21555711364746094, 0.21552742004394532, 0.21577728271484375, 0.2157127685546875, 0.21635789489746093, 0.21575782775878907, 0.21568818664550782, 0.2154915771484375, 0.2156810302734375, 0.21565440368652344, 0.21555404663085936, 0.21562060546875, 0.21577728271484375, 0.2157557830810547, 0.21564006042480469, 0.21555815124511718, 0.21571583557128907, 0.2157373504638672, 0.21600973510742189, 0.21567079162597655, 0.2159482879638672, 0.21565235900878907, 0.2156820526123047, 0.21579469299316406, 0.21556941223144532, 0.215583740234375, 0.21569842529296876, 0.21576191711425782, 0.21580294799804686, 0.21571781921386718, 0.215583740234375, 0.21599334716796875, 0.2168739776611328, 0.21618482971191405, 0.21625753784179688, 0.21591142272949218, 0.21590835571289063, 0.21615206909179688, 0.21587557983398437, 0.21748121643066406, 0.21573324584960937, 0.21585816955566406, 0.21574758911132813, 0.21579161071777345, 0.21602613830566406, 0.21589602661132812, 0.21573837280273436, 0.21600767517089844, 0.2161459197998047, 0.216195068359375, 0.21681561279296874, 0.21595545959472656, 0.2159800262451172, 0.21614183044433594, 0.21579263305664062, 0.21579168701171875, 0.44681414794921875, 0.21564927673339843, 0.21552024841308592, 0.2156093444824219, 0.21581517028808594, 0.21569024658203126, 0.21543324279785156, 0.2153850555419922, 0.21555917358398438, 0.21604454040527343, 0.2156441650390625, 0.21566259765625, 0.2157373504638672, 0.21574348449707031, 0.2156165771484375, 0.2157229461669922, 0.21567692565917967, 0.21569024658203126, 0.2158540802001953, 0.21572198486328126, 0.2158008270263672, 0.21594009399414063, 0.21594009399414063, 0.2173450164794922, 0.21601280212402343, 0.21582643127441406, 0.21562265014648438, 0.216121337890625, 0.21586329650878905, 0.21575065612792968, 0.21581721496582032, 0.21577626037597655, 0.21595135498046875, 0.21587046813964844, 0.2158223419189453, 0.21579263305664062, 0.21578854370117187, 0.21617765808105469, 0.21579571533203126, 0.2159615936279297, 0.2160906219482422, 0.2161459197998047, 0.21604864501953125, 0.2158551025390625, 0.2156943359375, 0.21593498229980468, 0.2158254089355469, 0.21669273376464843, 0.21583258056640625, 0.21568307495117187, 0.21580288696289063, 0.21581210327148437, 0.21588070678710938, 0.21590118408203124, 0.2158018493652344, 0.2157803497314453, 0.2158192596435547, 0.2158192596435547, 0.2157670440673828, 0.21574758911132813, 0.21588890075683595, 0.2157137908935547, 0.21567079162597655, 0.44661248779296875, 0.215657470703125, 0.21567999267578125, 0.21604351806640626, 0.2158858184814453, 0.21600665283203124, 0.21593600463867188, 0.2156145324707031, 0.21560723876953125, 0.21591346740722656, 0.21581619262695312, 0.21606809997558593, 0.216342529296875, 0.21595750427246094, 0.21586636352539063, 0.21600154113769532, 0.2158540802001953, 0.21593907165527343, 0.21594522094726562, 0.21608345031738282, 0.21599334716796875, 0.21589401245117187, 0.21608038330078125, 0.21600665283203124, 0.21591448974609376, 0.21582131958007814, 0.21565542602539062, 0.2157004852294922, 0.21572921752929688, 0.21575570678710937, 0.215878662109375, 0.21577113342285156, 0.2156390380859375, 0.21572709655761718, 0.21576191711425782, 0.215878662109375, 0.21596774291992188, 0.21590835571289063, 0.21587660217285157, 0.21585305786132813, 0.2158356475830078, 0.21571685791015624, 0.21579263305664062, 0.21578956604003907, 0.21685247802734375, 0.2158223419189453, 0.21567692565917967, 0.21570661926269533, 0.21589503479003908, 0.21621554565429688, 0.2161643524169922, 0.21638758850097656, 0.215878662109375, 0.21575885009765625, 0.21584588623046874, 0.21589401245117187, 0.2158745574951172, 0.21597900390625, 0.21583769226074218, 0.21566361999511718, 0.2158182373046875, 0.21573017883300782, 0.21579776000976564, 0.44662783813476564, 0.21568511962890624, 0.2158233642578125, 0.21589605712890625, 0.2156615753173828, 0.21600154113769532, 0.21570355224609375, 0.2157639617919922, 0.21581312561035157, 0.21569024658203126, 0.215552001953125, 0.21569638061523438, 0.21593087768554686, 0.2157076416015625, 0.21581004333496093, 0.21580902099609375, 0.21563699340820314, 0.21653094482421875, 0.21587660217285157, 0.21565440368652344, 0.21584588623046874, 0.2159288330078125, 0.21573426818847657, 0.21571583557128907, 0.21572096252441406, 0.21561036682128906, 0.21595750427246094, 0.21581210327148437, 0.215841796875, 0.21570559692382812, 0.21621452331542967, 0.2162554931640625, 0.216015869140625, 0.21625958251953126, 0.21600767517089844, 0.2159052734375, 0.2159800262451172, 0.21584077453613282, 0.21585714721679689, 0.2158612518310547, 0.21581517028808594, 0.2157496337890625, 0.21570252990722658, 0.21573837280273436, 0.21577215576171874, 0.21601689147949218, 0.21586534118652342, 0.21566053771972657, 0.21617971801757813, 0.215804931640625, 0.2159964141845703, 0.215910400390625, 0.21567692565917967, 0.21574143981933594, 0.215993408203125, 0.21583967590332032, 0.21577626037597655, 0.21723341369628907, 0.2157936706542969, 0.21567181396484375, 0.21573017883300782, 0.2158602294921875, 0.21591346740722656, 0.44625918579101564, 0.2154598388671875, 0.21547109985351562, 0.215942138671875, 0.2155018310546875, 0.2154967041015625, 0.21560525512695314, 0.21534719848632813, 0.2155335693359375, 0.21555609130859374, 0.21548646545410155, 0.21554893493652344, 0.21556838989257812, 0.2154239959716797, 0.21581210327148437, 0.21597080993652343, 0.2159831085205078, 0.21576191711425782, 0.21572813415527345, 0.21552543640136718, 0.21562054443359374, 0.21571994018554688, 0.21557862854003906, 0.21559500122070313, 0.21570867919921874, 0.21560115051269532, 0.21548851013183593, 0.21564210510253906, 0.21561856079101563, 0.21550079345703124, 0.2157137908935547, 0.21574348449707031, 0.21571481323242186, 0.21626162719726563, 0.21590016174316407, 0.2157127685546875, 0.21570867919921874, 0.21562982177734374, 0.21705215454101562, 0.21569229125976563, 0.21560012817382812, 0.2157373504638672, 0.21579058837890625, 0.21566464233398439, 0.21566361999511718, 0.21576191711425782, 0.21585101318359376, 0.21570867919921874, 0.21573529052734375, 0.21557554626464845, 0.2158305206298828, 0.2157742004394531, 0.2156083221435547, 0.2155704345703125, 0.21572813415527345, 0.21567082214355468, 0.21573321533203124, 0.2156513214111328, 0.21562777709960937, 0.21566265869140624, 0.21600965881347656, 0.21582028198242187, 0.21637837219238282, 0.4470773620605469, 0.2156072998046875, 0.21567079162597655, 0.21566566467285156, 0.21550079345703124, 0.21559091186523438, 0.21565542602539062, 0.21559500122070313, 0.2156697540283203, 0.21570661926269533, 0.21575474548339843, 0.21560426330566407, 0.2156011199951172, 0.21565235900878907, 0.21573939514160156, 0.2161090545654297, 0.2160199737548828, 0.21577523803710938, 0.21591552734375, 0.21555815124511718, 0.2156441650390625, 0.21569842529296876, 0.21553868103027343, 0.21665382385253906, 0.21579571533203126, 0.21555711364746094, 0.2155888671875, 0.21576499938964844, 0.215762939453125, 0.21582028198242187, 0.2158582458496094, 0.2157608337402344, 0.21596368408203126, 0.21594313049316408, 0.2157445068359375, 0.2157639617919922, 0.21581414794921874, 0.21576502990722657, 0.21569635009765625, 0.215762939453125, 0.2158223419189453, 0.21592268371582032, 0.21613157653808593, 0.21586329650878905, 0.21606501770019532, 0.21634661865234375, 0.21581517028808594, 0.21568716430664062, 0.215804931640625, 0.21572096252441406, 0.215689208984375, 0.21578239440917968, 0.21577523803710938, 0.21575167846679688, 0.21579776000976564, 0.21582028198242187, 0.21593600463867188, 0.2157178955078125, 0.21569024658203126, 0.21573324584960937, 0.21574758911132813, 0.21596876525878905, 0.21589605712890625, 0.44737637329101565, 0.21560012817382812, 0.2154035186767578, 0.21556224060058593, 0.21559091186523438, 0.21557760620117186, 0.2155878448486328, 0.2156195831298828, 0.21551922607421875, 0.21568511962890624, 0.21572813415527345, 0.21568818664550782, 0.21565542602539062, 0.21562265014648438, 0.21565440368652344, 0.21580799865722655, 0.21636402893066406, 0.2157178955078125, 0.2158018493652344, 0.21578341674804688, 0.21576499938964844, 0.21569740295410156, 0.21566464233398439, 0.21570970153808594, 0.2157936706542969, 0.21613772583007812, 0.21595237731933595, 0.21639680480957033, 0.21639474487304688, 0.21601791381835939, 0.2160025634765625, 0.21584999084472656, 0.2159288330078125, 0.21633024597167969, 0.21584793090820312, 0.2159964141845703, 0.21607936096191407, 0.2160148468017578, 0.21588992309570312, 0.21595852661132814, 0.21601689147949218, 0.21620632934570314, 0.21618380737304688, 0.216595458984375, 0.21603021240234374, 0.21584690856933594, 0.21581619262695312, 0.21571994018554688, 0.21570252990722658, 0.21567692565917967, 0.21569638061523438, 0.21571072387695311, 0.2158970947265625, 0.2156810302734375, 0.2158981170654297, 0.2158602294921875, 0.21611932373046874, 0.21608546447753907, 0.21588479614257813, 0.21581004333496093, 0.21603225708007812, 0.21623910522460937, 0.21606809997558593, 0.44815972900390627, 0.21594316101074218, 0.21585305786132813, 0.21599130249023438, 0.21577932739257813, 0.21586534118652342, 0.21586431884765625, 0.21613055419921876, 0.21584895324707032, 0.21639578247070312, 0.21571994018554688, 0.215657470703125, 0.21565951538085937, 0.2156748809814453, 0.21572607421875, 0.21584690856933594, 0.21561138916015626, 0.21668966674804688, 0.2158008270263672, 0.21577113342285156, 0.21580902099609375, 0.2158673858642578, 0.21579673767089844, 0.21592063903808595, 0.21624319458007812, 0.21564927673339843, 0.21612953186035155, 0.21587046813964844, 0.2156810302734375, 0.21577830505371093, 0.2156943359375, 0.2155827178955078, 0.21565542602539062, 0.21575372314453126, 0.21602610778808592, 0.2159052734375, 0.2158039093017578, 0.21560421752929687, 0.21574656677246093, 0.2161326141357422, 0.21603021240234374, 0.21574041748046874, 0.21587251281738282, 0.21601791381835939, 0.21597900390625, 0.2160343017578125, 0.21582028198242187, 0.21569126892089843, 0.2157424621582031, 0.21565235900878907, 0.21565440368652344, 0.2158602294921875, 0.21615206909179688, 0.21636915588378905, 0.2160148468017578, 0.21692825317382813, 0.21595960998535157, 0.21593696594238282, 0.21585101318359376, 0.21585101318359376, 0.21587251281738282, 0.2156134338378906, 0.21572813415527345, 0.44772250366210936, 0.2154598388671875, 0.21554896545410157, 0.21563491821289063, 0.21536154174804686, 0.2154977264404297, 0.21561651611328125, 0.2155335693359375, 0.21565338134765624, 0.21574867248535157, 0.21558367919921875, 0.2155714874267578, 0.21574960327148437, 0.2156390380859375, 0.21553868103027343, 0.21560838317871095, 0.21563385009765626, 0.2156810302734375, 0.2155704345703125, 0.2155704345703125, 0.21559603881835937, 0.21553663635253906, 0.21553152465820313, 0.21560525512695314, 0.21560421752929687, 0.21559397888183593, 0.21667225646972657, 0.21568818664550782, 0.21559910583496095, 0.21570457458496095, 0.2156513214111328, 0.21557554626464845, 0.21559193420410155, 0.21564210510253906, 0.21565338134765624, 0.21612953186035155, 0.2163251190185547, 0.21550592041015626, 0.215910400390625, 0.21579469299316406, 0.2157455291748047, 0.215762939453125, 0.21571891784667968, 0.21559295654296876, 0.21559091186523438, 0.2155847625732422, 0.21548442077636717, 0.21573426818847657, 0.21578341674804688, 0.21575270080566405, 0.21571583557128907, 0.2156134338378906, 0.2155878448486328, 0.21560012817382812, 0.21608242797851562, 0.2159646759033203, 0.2156615753173828, 0.21582438659667968, 0.21574348449707031, 0.215689208984375, 0.21561856079101563, 0.21556121826171876, 0.2156380157470703]",tokens/s,4.563157582132701,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30bc-044d66e16a5f88ab369ebb7b;42c1c814-ed81-4534-a411-77135dba3049) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,2234.441728,2932.342784,0.0,2285.89568,2082.575872,s,10,2.433214202880859,0.24332142028808593,0.0008617435568583196,0.24325077056884764,0.24459763488769531,0.24462198333740234,0.24464146209716797,"[0.24380015563964844, 0.24204917907714843, 0.24258213806152343, 0.24336614990234376, 0.24313539123535155, 0.24226588439941407, 0.24288755798339845, 0.24388919067382814, 0.24464633178710937, 0.24459222412109374]",tokens/s,1052.1063032465574,kWh,2.856464352872637e-06,1.5652168180297718e-06,1.2995936322662033e-05,1.7417617493564442e-05,tokens/kWh,14697762.199370166,MB,2234.441728,2959.60576,0.0,2313.158656,2180.684288,s,10,139.734884765625,13.9734884765625,0.01146198033243705,13.9710908203125,13.98807626953125,13.992370166015625,13.995805283203126,"[13.9871220703125, 13.9770771484375, 13.9966640625, 13.980609375, 13.95928515625, 13.96146875, 13.9670087890625, 13.9751728515625, 13.965376953125, 13.965099609375]",tokens/s,4.508537728833344,kWh,0.00016483188493384256,9.03412226583766e-05,0.0007461166431891384,0.0010012897507813577,tokens/kWh,62918.85036358144,,s,629,141.7022607421874,0.22528181358058424,0.02898003334479797,0.22168063354492187,0.2224162872314453,0.22265180053710937,0.4647246643066406,"[0.22235647583007812, 0.22139903259277344, 0.22141644287109374, 0.22147891235351563, 0.2213693389892578, 0.22170726013183595, 0.22163967895507813, 0.2214256591796875, 0.22199398803710937, 0.22159564208984375, 0.22243431091308594, 0.22190284729003906, 0.22202674865722657, 0.2210723876953125, 0.22141133117675782, 0.22209945678710938, 0.22192127990722657, 0.2224189453125, 0.22194073486328125, 0.22234317016601562, 0.22204518127441406, 0.22228582763671875, 0.2221793212890625, 0.22186904907226562, 0.222603271484375, 0.22207487487792968, 0.22233599853515626, 0.22205238342285155, 0.22235133361816406, 0.2235494384765625, 0.22263296508789063, 0.22236671447753906, 0.22127410888671875, 0.2215219268798828, 0.2213519287109375, 0.22189056396484375, 0.22161100769042968, 0.22196940612792967, 0.22203187561035156, 0.22196531677246092, 0.22140007019042968, 0.2213396453857422, 0.2219325408935547, 0.2218577880859375, 0.22200933837890624, 0.22186495971679687, 0.22244557189941405, 0.22216499328613282, 0.22224179077148437, 0.221955078125, 0.2217943115234375, 0.2224179229736328, 0.22189776611328124, 0.2225540771484375, 0.22166732788085938, 0.22277632141113282, 0.22537420654296875, 0.22221209716796875, 0.22146047973632813, 0.22176870727539064, 0.22216294860839844, 0.22160383605957032, 0.46420379638671877, 0.2221997833251953, 0.22188236999511718, 0.22198272705078126, 0.2217697296142578, 0.22201139831542968, 0.2215034942626953, 0.22119526672363282, 0.22219879150390626, 0.22249369812011718, 0.22189773559570314, 0.22246092224121095, 0.22249574279785156, 0.22148197937011718, 0.22124134826660155, 0.221802490234375, 0.22128536987304687, 0.2213744659423828, 0.22234214782714845, 0.22216806030273437, 0.22141746520996095, 0.22162637329101562, 0.22157516479492187, 0.22105191040039063, 0.2215782470703125, 0.2218741760253906, 0.2216898498535156, 0.22149427795410156, 0.2215536651611328, 0.2222725067138672, 0.22197760009765624, 0.22207795715332032, 0.2216847381591797, 0.22156083679199218, 0.2215741424560547, 0.2220595245361328, 0.22160179138183594, 0.22255001831054688, 0.22156492614746093, 0.22203904724121093, 0.22160179138183594, 0.22140109252929688, 0.22128640747070313, 0.22133660888671874, 0.2213908233642578, 0.22154035949707032, 0.22508236694335937, 0.22183013916015626, 0.22202879333496095, 0.2222407684326172, 0.2216785888671875, 0.22169293212890626, 0.22160691833496093, 0.22218649291992187, 0.2214686737060547, 0.22141439819335937, 0.222487548828125, 0.2225991668701172, 0.22195301818847657, 0.22147276306152344, 0.22199909973144533, 0.22176666259765626, 0.22219879150390626, 0.4654428100585937, 0.2218987579345703, 0.22210765075683594, 0.2219008026123047, 0.22165504455566407, 0.22182707214355468, 0.22211891174316406, 0.22192640686035156, 0.221770751953125, 0.22130482482910158, 0.22165913391113282, 0.22189263916015625, 0.2217932434082031, 0.22167552185058595, 0.22172467041015625, 0.22150143432617186, 0.22293606567382812, 0.22162535095214844, 0.2228019256591797, 0.22176255798339845, 0.22248243713378907, 0.22172979736328124, 0.22229402160644532, 0.22310092163085937, 0.22198988342285156, 0.22210150146484375, 0.22238616943359374, 0.22228480529785155, 0.22175334167480468, 0.22205337524414062, 0.22236058044433593, 0.2222407684326172, 0.2220369873046875, 0.2219929656982422, 0.2221854705810547, 0.2224127960205078, 0.22233395385742188, 0.22231858825683593, 0.22264627075195312, 0.22218751525878908, 0.22241587829589843, 0.22255718994140625, 0.2218741760253906, 0.22225920104980468, 0.22183935546875, 0.22192332458496095, 0.22207795715332032, 0.223752197265625, 0.22219673156738282, 0.22203289794921874, 0.22212197875976564, 0.2223206329345703, 0.22238616943359374, 0.22253575134277342, 0.22210047912597655, 0.22248442077636718, 0.2219356231689453, 0.2220912628173828, 0.22202572631835937, 0.22224896240234376, 0.22215168762207033, 0.22308761596679688, 0.22262168884277345, 0.46642996215820315, 0.22230323791503906, 0.22220390319824218, 0.22247935485839843, 0.22223155212402343, 0.22242303466796876, 0.22273023986816406, 0.2226411590576172, 0.22237493896484375, 0.22229808044433594, 0.2220185546875, 0.22259507751464844, 0.22249984741210938, 0.22244659423828125, 0.22167654418945312, 0.22273228454589844, 0.2222581787109375, 0.22126182556152343, 0.2230200653076172, 0.22316950988769532, 0.22224179077148437, 0.22140518188476563, 0.22184857177734374, 0.22160281372070312, 0.22178099060058593, 0.22235443115234374, 0.2213928985595703, 0.22145535278320314, 0.22126591491699218, 0.22138368225097657, 0.22140518188476563, 0.22138983154296876, 0.22134886169433593, 0.22174208068847656, 0.22144720458984374, 0.22132118225097655, 0.22146354675292967, 0.2214001007080078, 0.22144508361816406, 0.2214297637939453, 0.22163763427734376, 0.221517822265625, 0.22244044494628906, 0.22324838256835938, 0.22501274108886718, 0.22167861938476563, 0.22164067077636718, 0.2215004119873047, 0.22143283081054688, 0.22134579467773438, 0.22140518188476563, 0.22115122985839844, 0.22140415954589843, 0.2217195587158203, 0.22167449951171875, 0.2215034942626953, 0.22143795776367187, 0.2215854034423828, 0.22149530029296874, 0.22169497680664063, 0.22140931701660158, 0.22172157287597657, 0.22152806091308594, 0.4667914123535156, 0.22145228576660156, 0.2217512969970703, 0.2216468505859375, 0.22147789001464843, 0.221444091796875, 0.22145126342773438, 0.22144717407226563, 0.22157005310058595, 0.22128947448730468, 0.22147071838378907, 0.22156903076171874, 0.22139801025390626, 0.2215116729736328, 0.2211778564453125, 0.22171034240722656, 0.22124748229980468, 0.22265548706054689, 0.22149836730957032, 0.22151065063476563, 0.22136422729492186, 0.22120550537109376, 0.22162431335449218, 0.22184754943847657, 0.2219530487060547, 0.22183625793457032, 0.22151065063476563, 0.22187826538085936, 0.2214799346923828, 0.22132736206054687, 0.22136524963378906, 0.22137344360351563, 0.22129356384277343, 0.22121778869628905, 0.22129254150390626, 0.2211164093017578, 0.22158746337890625, 0.22140415954589843, 0.22128128051757812, 0.22147071838378907, 0.22140313720703125, 0.22121273803710936, 0.22131808471679687, 0.2216048583984375, 0.22186810302734375, 0.22218540954589844, 0.221623291015625, 0.22134477233886718, 0.22152088928222657, 0.22141746520996095, 0.22138368225097657, 0.22134169006347656, 0.22129458618164063, 0.22138983154296876, 0.22134169006347656, 0.2212956085205078, 0.221159423828125, 0.2240184326171875, 0.22193971252441405, 0.2216089630126953, 0.22141952514648439, 0.22154853820800782, 0.2216837158203125, 0.46434201049804685, 0.22140415954589843, 0.22150758361816406, 0.22161817932128905, 0.2220298309326172, 0.22168780517578124, 0.2222530517578125, 0.22144613647460937, 0.2217840576171875, 0.22155264282226564, 0.22125669860839844, 0.22169197082519532, 0.22176358032226562, 0.22181779479980468, 0.22157926940917969, 0.2215782470703125, 0.2214246368408203, 0.22152294921875, 0.22142771911621092, 0.22154547119140625, 0.22132429504394532, 0.22135606384277343, 0.22152394104003906, 0.22135398864746095, 0.22151577758789062, 0.2220185546875, 0.22214349365234376, 0.22169088745117188, 0.2212464599609375, 0.22148403930664062, 0.221370361328125, 0.22125363159179687, 0.22161305236816406, 0.22148197937011718, 0.22166015625, 0.2215977020263672, 0.22134783935546876, 0.2212351989746094, 0.2241535949707031, 0.22163558959960938, 0.2216785888671875, 0.22160691833496093, 0.221549560546875, 0.22167141723632813, 0.22142054748535156, 0.2214297637939453, 0.22139903259277344, 0.2217943115234375, 0.22149119567871095, 0.221876220703125, 0.22163250732421874, 0.221765625, 0.2215004119873047, 0.2217400360107422, 0.22149427795410156, 0.2213939208984375, 0.2211758117675781, 0.22143487548828125, 0.22141644287109374, 0.22124339294433593, 0.22147071838378907, 0.22166630554199218, 0.22171136474609374, 0.46487347412109375, 0.22141133117675782, 0.22127206420898438, 0.221338623046875, 0.2219622344970703, 0.22154342651367187, 0.22202265930175782, 0.22145330810546876, 0.22258995056152345, 0.22142874145507813, 0.22155264282226564, 0.22175334167480468, 0.2215741424560547, 0.22162739562988282, 0.22164378356933595, 0.2218803253173828, 0.22163250732421874, 0.22160076904296874, 0.22158848571777343, 0.2215116729736328, 0.2215679931640625, 0.22120550537109376, 0.22134477233886718, 0.22316543579101564, 0.22167039489746093, 0.22168063354492187, 0.22168678283691406, 0.2215116729736328, 0.2215188751220703, 0.22155363464355468, 0.22205235290527345, 0.22159359741210938, 0.22261351013183595, 0.2219059143066406, 0.22219161987304686, 0.22189773559570314, 0.22165504455566407, 0.22153114318847655, 0.2218076171875, 0.22219570922851561, 0.22192445373535155, 0.22225091552734375, 0.22204415893554688, 0.22189260864257812, 0.22191104125976563, 0.2215352325439453, 0.22143385314941405, 0.22180557250976562, 0.22147584533691406, 0.22139698791503906, 0.22178713989257812, 0.22213325500488282, 0.22149221801757812, 0.22222848510742188, 0.22150860595703126, 0.2214686737060547, 0.22136627197265624, 0.22127001953125, 0.22115020751953124, 0.22117478942871094, 0.22150143432617186, 0.22140211486816405, 0.22120550537109376, 0.46591384887695314, 0.22151065063476563, 0.22132838439941407, 0.22122496032714845, 0.22145433044433593, 0.22153318786621093, 0.2213519287109375, 0.22116044616699218, 0.22144717407226563, 0.22135296630859375, 0.2212833251953125, 0.22198477172851563, 0.22165811157226561, 0.22226739501953124, 0.22169804382324218, 0.22206361389160156, 0.22276095581054686, 0.22171136474609374, 0.22175949096679687, 0.22171034240722656, 0.22183833312988283, 0.2217021484375, 0.22223257446289063, 0.221876220703125, 0.22166323852539063, 0.22185369873046876, 0.22207693481445312, 0.22190386962890624, 0.2217379913330078, 0.22185165405273438, 0.22234214782714845, 0.22208717346191406, 0.22299136352539062, 0.22185061645507811, 0.22382899475097656, 0.22139187622070314, 0.22159461975097655, 0.22185369873046876, 0.2214072265625, 0.22193971252441405, 0.22215577697753905, 0.22185267639160156, 0.22164480590820312, 0.22156594848632813, 0.2215188751220703, 0.22147990417480468, 0.22186189270019532, 0.22172262573242188, 0.22174310302734376, 0.22215577697753905, 0.22219366455078124, 0.2219448699951172, 0.2216007385253906, 0.22184959411621094, 0.22170930480957032, 0.22189164733886718, 0.2221639404296875, 0.22170518493652344, 0.22201344299316406, 0.2217830352783203, 0.22169395446777343, 0.22200831604003907, 0.22163148498535157, 0.46692044067382815, 0.2217400360107422, 0.22165196228027345, 0.22197555541992187, 0.22193458557128906, 0.22203904724121093, 0.22154655456542968, 0.22138873291015626, 0.22145330810546876, 0.22146354675292967, 0.22145024108886718, 0.22175640869140625, 0.22180351257324218, 0.22217727661132813, 0.22153727722167968, 0.221896728515625, 0.2214164123535156, 0.2225797119140625, 0.22176051330566407, 0.22177381896972656, 0.22154649353027345, 0.221233154296875, 0.2214615020751953, 0.22134681701660155, 0.22129356384277343, 0.22127615356445313, 0.22128640747070313, 0.22148095703125, 0.22134169006347656, 0.22124339294433593, 0.22114816284179686, 0.2213191680908203, 0.2215188446044922, 0.22131814575195313, 0.22146969604492187, 0.22117990112304686, 0.22134375, 0.22164889526367187, 0.22181581115722657, 0.222171142578125, 0.22175640869140625, 0.22172979736328124, 0.22134066772460936, 0.22151986694335937, 0.22140313720703125, 0.22141030883789062, 0.22220594787597656, 0.22215887451171876, 0.22227349853515624, 0.22183116149902343, 0.22192640686035156, 0.22173184204101562, 0.2216898498535156, 0.22144717407226563, 0.22147584533691406, 0.22146354675292967, 0.22156903076171874, 0.22176051330566407, 0.22247833251953125, 0.221802490234375, 0.22252543640136718, 0.22219468688964844, 0.22166015625, 0.46707403564453126, 0.22175640869140625, 0.22192536926269532, 0.22187315368652344, 0.22208204650878907, 0.22162535095214844, 0.22142361450195314, 0.22128025817871094, 0.22170008850097656, 0.2215782470703125, 0.22144613647460937, 0.22178201293945313, 0.22167654418945312, 0.22178816223144532, 0.2213816375732422, 0.2216816711425781, 0.22158131408691406, 0.22198066711425782, 0.22218853759765625, 0.221770751953125, 0.22207693481445312, 0.2219069366455078, 0.22215782165527342, 0.2219069366455078, 0.2217830352783203, 0.2216417236328125, 0.2232033233642578, 0.22143283081054688, 0.22127719116210937, 0.22148300170898438, 0.22127104187011717, 0.2214615020751953, 0.22147584533691406, 0.22175538635253905, 0.22177690124511718, 0.2215290832519531, 0.22144717407226563, 0.22135296630859375, 0.22129356384277343, 0.22165196228027345, 0.22160076904296874, 0.22165402221679686, 0.22142771911621092, 0.22134783935546876, 0.22133555603027344, 0.22152088928222657, 0.22161509704589843, 0.2215034942626953, 0.22159257507324218, 0.22131712341308593, 0.22130586242675782, 0.22127410888671875, 0.22141850280761718, 0.22161305236816406, 0.22188954162597657, 0.22163456726074218, 0.2218014678955078, 0.2217902069091797, 0.22162124633789063, 0.22154444885253907, 0.22161305236816406, 0.2218956756591797, 0.22184857177734374]",tokens/s,4.438884720014453,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpy5nhagtv/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34cb-2276d4f60a66e8de27738ec6;bdcaad9a-9b5a-493f-938d-a03fccb10775) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1386.098688,4467.458048,0.0,3821.010944,3588.539392,s,10,2.8072667541503904,0.28072667541503904,0.0013891453980565372,0.2802557830810547,0.2808915802001953,0.2828903091430664,0.2844892922973633,"[0.2848890380859375, 0.28023013305664063, 0.2801990051269531, 0.28025711059570313, 0.2802708740234375, 0.2801849365234375, 0.2804474182128906, 0.280294189453125, 0.28023959350585936, 0.28025445556640627]",tokens/s,911.9190387643711,kWh,3.3119122141305312e-06,1.8147647802379651e-06,1.6291780008722804e-05,2.1418457003091298e-05,tokens/kWh,11952308.234110976,MB,1386.098688,4467.458048,0.0,3821.010944,3698.945536,s,10,163.89264257812502,16.389264257812503,0.003489083519208589,16.3881943359375,16.392421484375,16.3953728515625,16.3977339453125,"[16.39832421875, 16.389771484375, 16.3869296875, 16.391765625, 16.388423828125, 16.38619921875, 16.38796484375, 16.390015625, 16.3857890625, 16.387458984375]",tokens/s,3.8439797546109427,kWh,0.00019345068814485898,0.00010602696284692681,0.000949664353866678,0.0012491420048584639,tokens/kWh,50434.61812585377,,s,629,166.15247476196274,0.264153377999941,0.03329400953216876,0.260126708984375,0.26031758422851564,0.26039378051757817,0.540367294921875,"[0.2604298095703125, 0.26013287353515624, 0.26008575439453124, 0.26007962036132815, 0.26021786499023436, 0.2603202514648438, 0.2601820068359375, 0.26012875366210936, 0.2602219543457031, 0.26012368774414063, 0.2601778869628906, 0.26016357421875, 0.2602014770507812, 0.2604083251953125, 0.26030694580078123, 0.26017074584960936, 0.26030694580078123, 0.2600079345703125, 0.2600693664550781, 0.25997927856445313, 0.2600478820800781, 0.2602444763183594, 0.2602977294921875, 0.2602588195800781, 0.26022711181640623, 0.26007958984375, 0.26012161254882815, 0.26005709838867186, 0.26022503662109375, 0.26006732177734376, 0.2602147827148438, 0.2602352600097656, 0.2603049011230469, 0.26016973876953126, 0.260178955078125, 0.26042266845703127, 0.2604431457519531, 0.2602874755859375, 0.26027420043945315, 0.2602597961425781, 0.26024856567382815, 0.26021273803710937, 0.2602014770507812, 0.26022091674804687, 0.26042776489257813, 0.26031103515625, 0.2603397216796875, 0.2603745422363281, 0.2603683776855469, 0.2603018798828125, 0.26036627197265627, 0.2605783081054687, 0.2604216613769531, 0.26033352661132814, 0.260316162109375, 0.2602147827148438, 0.26032333374023436, 0.2602229614257813, 0.2604031982421875, 0.26052197265625, 0.26033868408203126, 0.260274169921875, 0.5405133056640625, 0.25998745727539063, 0.25996185302734376, 0.25998541259765623, 0.2601246643066406, 0.2599372863769531, 0.26018405151367185, 0.25999563598632813, 0.25993011474609373, 0.2600499267578125, 0.25996185302734376, 0.2602874755859375, 0.2600693664550781, 0.25998745727539063, 0.2600140686035156, 0.26008270263671873, 0.259999755859375, 0.26005197143554687, 0.2600048522949219, 0.2604021911621094, 0.2602219543457031, 0.26024551391601564, 0.26012161254882815, 0.2601471862792969, 0.26012057495117186, 0.260178955078125, 0.26022091674804687, 0.26046771240234373, 0.2601615295410156, 0.26024038696289065, 0.26021273803710937, 0.2602147827148438, 0.26016973876953126, 0.2602188720703125, 0.2601195373535156, 0.26058139038085937, 0.260274169921875, 0.2602301330566406, 0.26009088134765623, 0.26012875366210936, 0.2600478820800781, 0.260136962890625, 0.2601390075683594, 0.26003045654296875, 0.2600919189453125, 0.2601973876953125, 0.26009088134765623, 0.2600837097167969, 0.26009292602539064, 0.260305908203125, 0.26029464721679685, 0.26030899047851563, 0.2602496032714844, 0.26015640258789063, 0.26017791748046876, 0.2601666564941406, 0.26009088134765623, 0.2602219543457031, 0.26042266845703127, 0.26024346923828123, 0.2601379699707031, 0.2602014770507812, 0.2602086486816406, 0.5403678588867188, 0.2603028564453125, 0.2600447998046875, 0.2601502685546875, 0.2600048522949219, 0.26019326782226565, 0.2600621948242188, 0.2599915466308594, 0.2599700622558594, 0.2602352600097656, 0.2600621948242188, 0.26012774658203125, 0.2601891784667969, 0.26008984375, 0.2599700622558594, 0.26002740478515624, 0.25998849487304687, 0.26010009765625, 0.2601195373535156, 0.2601656188964844, 0.26014004516601563, 0.260073486328125, 0.26001715087890626, 0.2600939636230469, 0.2603970642089844, 0.26008779907226565, 0.26007040405273435, 0.26021786499023436, 0.2600970153808594, 0.2600345458984375, 0.2600058898925781, 0.2600550537109375, 0.2601164855957031, 0.260126708984375, 0.26001715087890626, 0.2600202331542969, 0.26008779907226565, 0.2600335388183594, 0.2601031799316406, 0.26019021606445314, 0.26009292602539064, 0.26026190185546877, 0.26016973876953126, 0.26018405151367185, 0.260126708984375, 0.2599700622558594, 0.260210693359375, 0.26018405151367185, 0.2603049011230469, 0.26017279052734377, 0.2600960998535156, 0.26008053588867186, 0.2600447998046875, 0.26002328491210935, 0.2600110168457031, 0.2601195373535156, 0.2600560607910156, 0.2601257019042969, 0.2600007629394531, 0.26009292602539064, 0.2601082763671875, 0.26012057495117186, 0.26048614501953127, 0.5407078247070313, 0.26011749267578127, 0.26015640258789063, 0.260136962890625, 0.260284423828125, 0.26017074584960936, 0.26010015869140624, 0.26020858764648436, 0.2601666564941406, 0.26003662109375, 0.2600970153808594, 0.26017587280273435, 0.26000897216796875, 0.26008575439453124, 0.2603550720214844, 0.26025164794921873, 0.2600058898925781, 0.2601666564941406, 0.2600345458984375, 0.2600980529785156, 0.2600058898925781, 0.2600386657714844, 0.26025982666015623, 0.2601533508300781, 0.26014617919921873, 0.2602496032714844, 0.2600744934082031, 0.2601820068359375, 0.2601922607421875, 0.2602076110839844, 0.26013592529296875, 0.26038885498046876, 0.26029364013671874, 0.26032333374023436, 0.26011239624023436, 0.26018508911132815, 0.2602854309082031, 0.2602239990234375, 0.2601471862792969, 0.2601748352050781, 0.26018405151367185, 0.260453369140625, 0.2603192443847656, 0.2602230224609375, 0.26013180541992187, 0.260316162109375, 0.2601594848632813, 0.2601441345214844, 0.26007244873046875, 0.26016973876953126, 0.2601164855957031, 0.26031103515625, 0.26038681030273436, 0.2602608642578125, 0.260073486328125, 0.26023934936523435, 0.26009906005859373, 0.2601584777832031, 0.2600621948242188, 0.26027008056640627, 0.26026904296875, 0.26036532592773437, 0.26014004516601563, 0.5404119262695313, 0.25996902465820315, 0.25991064453125, 0.2599966735839844, 0.2600755310058594, 0.2599987182617188, 0.2600939636230469, 0.26010726928710937, 0.25997927856445313, 0.26001202392578127, 0.26005197143554687, 0.26035198974609375, 0.26012261962890626, 0.2601553955078125, 0.26011544799804687, 0.26016256713867186, 0.2600048522949219, 0.26005093383789063, 0.259989501953125, 0.26000384521484377, 0.260421630859375, 0.26011749267578127, 0.26003250122070315, 0.2600284118652344, 0.2600130615234375, 0.26020556640625, 0.26025982666015623, 0.2602270812988281, 0.2600611877441406, 0.2601308288574219, 0.2600919189453125, 0.26027008056640627, 0.26002532958984376, 0.2602301330566406, 0.2600560607910156, 0.26042266845703127, 0.26021786499023436, 0.26033355712890627, 0.2601820068359375, 0.2600202331542969, 0.26016461181640627, 0.26014105224609374, 0.2602024841308594, 0.2601257019042969, 0.26028338623046876, 0.2601830749511719, 0.26015127563476564, 0.2601236572265625, 0.2601257019042969, 0.26051788330078124, 0.26029876708984373, 0.26014004516601563, 0.2600867919921875, 0.26011444091796876, 0.2600611877441406, 0.26010726928710937, 0.26002227783203125, 0.26012261962890626, 0.2603407287597656, 0.2602506103515625, 0.26011239624023436, 0.26010726928710937, 0.26004376220703124, 0.540248046875, 0.2601922607421875, 0.2599966735839844, 0.2599495544433594, 0.25990451049804686, 0.26010531616210936, 0.2599730224609375, 0.25991168212890625, 0.2599587707519531, 0.2602270812988281, 0.2600191955566406, 0.2600007629394531, 0.26014617919921873, 0.2600867919921875, 0.25993011474609373, 0.26002944946289064, 0.2599649353027344, 0.260105224609375, 0.2600478820800781, 0.2600663146972656, 0.25986868286132814, 0.25998541259765623, 0.2599813232421875, 0.2601082763671875, 0.2602352600097656, 0.2601041870117187, 0.2601082763671875, 0.2601973876953125, 0.26011749267578127, 0.2600663146972656, 0.2600140686035156, 0.2602147827148438, 0.2602076110839844, 0.2603673706054688, 0.26010009765625, 0.2600714111328125, 0.2600816650390625, 0.26017587280273435, 0.26004583740234377, 0.26028851318359375, 0.26024652099609374, 0.26011239624023436, 0.26015435791015623, 0.2601615295410156, 0.2600407104492187, 0.2600172119140625, 0.26012051391601565, 0.2601451416015625, 0.2603325500488281, 0.2601820068359375, 0.2600765380859375, 0.2600335388183594, 0.26003762817382814, 0.26007962036132815, 0.2600447998046875, 0.2601666564941406, 0.260105224609375, 0.26013388061523435, 0.26012261962890626, 0.26009906005859373, 0.2600611877441406, 0.260178955078125, 0.260537353515625, 0.5406893920898438, 0.25999563598632813, 0.26002944946289064, 0.26001202392578127, 0.2599638977050781, 0.25995263671875, 0.2599413757324219, 0.2603120727539063, 0.2600202331542969, 0.25998541259765623, 0.2599034729003906, 0.2599710693359375, 0.25996902465820315, 0.26002227783203125, 0.26024038696289065, 0.26017587280273435, 0.2601492614746094, 0.2600837097167969, 0.2600110168457031, 0.2600202331542969, 0.26000384521484377, 0.26016357421875, 0.2602168273925781, 0.2602137451171875, 0.26002740478515624, 0.2602188720703125, 0.2599925842285156, 0.2601041870117187, 0.26011651611328124, 0.26032635498046874, 0.2602158203125, 0.2603356018066406, 0.26014617919921873, 0.26028033447265625, 0.2600919189453125, 0.26025164794921873, 0.26034381103515625, 0.26021786499023436, 0.2602567749023437, 0.26020965576171873, 0.2601257019042969, 0.26006427001953125, 0.2600478820800781, 0.26008062744140625, 0.2602711181640625, 0.2601513061523438, 0.2601615295410156, 0.26016259765625, 0.260000732421875, 0.2599966735839844, 0.26008474731445314, 0.2601062316894531, 0.2602567749023437, 0.26017074584960936, 0.26014208984375, 0.2601134033203125, 0.26016461181640627, 0.2600837097167969, 0.2601308288574219, 0.2603345947265625, 0.26025369262695314, 0.260274169921875, 0.2600980529785156, 0.5405234985351562, 0.26009292602539064, 0.2600345458984375, 0.2600663146972656, 0.26004376220703124, 0.2600058898925781, 0.26000997924804686, 0.26011444091796876, 0.260068359375, 0.26003250122070315, 0.25992703247070315, 0.26030694580078123, 0.2600396728515625, 0.2602669982910156, 0.2600560607910156, 0.26017691040039065, 0.260063232421875, 0.2601308288574219, 0.260136962890625, 0.2602291259765625, 0.26010931396484377, 0.26014822387695313, 0.2599966735839844, 0.260052978515625, 0.26002740478515624, 0.2600663146972656, 0.2601533508300781, 0.260232177734375, 0.26017074584960936, 0.26018817138671874, 0.2603263854980469, 0.26019021606445314, 0.26006527709960936, 0.26017587280273435, 0.26023934936523435, 0.2603714599609375, 0.2601922607421875, 0.2601615295410156, 0.26031716918945313, 0.26016461181640627, 0.26007244873046875, 0.2601922607421875, 0.26012875366210936, 0.2601185302734375, 0.26021786499023436, 0.26016973876953126, 0.26021273803710937, 0.260136962890625, 0.2601031799316406, 0.260379638671875, 0.2602291259765625, 0.26033050537109376, 0.26011544799804687, 0.2603222961425781, 0.26023934936523435, 0.26016973876953126, 0.2601441345214844, 0.26020660400390627, 0.26016461181640627, 0.2604021911621094, 0.2601748352050781, 0.26020965576171873, 0.26023834228515624, 0.5403658447265625, 0.26022808837890626, 0.26013491821289064, 0.26002227783203125, 0.2600079345703125, 0.2600202331542969, 0.25998028564453124, 0.26002328491210935, 0.26002532958984376, 0.260173828125, 0.26014208984375, 0.2601799621582031, 0.2599915466308594, 0.26002944946289064, 0.25997720336914065, 0.26005914306640626, 0.2599342041015625, 0.25996185302734376, 0.26002944946289064, 0.2600068969726563, 0.25997515869140625, 0.26023321533203125, 0.25999563598632813, 0.26012161254882815, 0.26036224365234373, 0.26008575439453124, 0.2602362976074219, 0.2601871337890625, 0.26005810546875, 0.2601082763671875, 0.2600693664550781, 0.2600611877441406, 0.2601021423339844, 0.2602342529296875, 0.2601605224609375, 0.2600130615234375, 0.25995367431640626, 0.2600386657714844, 0.2599915466308594, 0.2601390075683594, 0.2602229614257813, 0.260105224609375, 0.2600284118652344, 0.2600478820800781, 0.25997927856445313, 0.2600284118652344, 0.260094970703125, 0.260094970703125, 0.26026190185546877, 0.26019021606445314, 0.2601257019042969, 0.26014004516601563, 0.26012161254882815, 0.2601113586425781, 0.26005197143554687, 0.2601246643066406, 0.2601041870117187, 0.26011239624023436, 0.2600488891601562, 0.2601082763671875, 0.26012979125976565, 0.2600478820800781, 0.2603417663574219, 0.5405787963867188, 0.2601574401855469, 0.2599966735839844, 0.2600396728515625, 0.2600345458984375, 0.26000180053710936, 0.260031494140625, 0.26007244873046875, 0.2600499267578125, 0.26014935302734377, 0.26003753662109375, 0.26024551391601564, 0.26003762817382814, 0.2600888366699219, 0.26008984375, 0.26002740478515624, 0.26008575439453124, 0.2600960083007812, 0.25997515869140625, 0.26009906005859373, 0.2599413757324219, 0.25995672607421877, 0.2602147827148438, 0.26008270263671873, 0.2601390075683594, 0.260126708984375, 0.25999462890625, 0.260052978515625, 0.26000384521484377, 0.26012261962890626, 0.26016256713867186, 0.26027621459960937, 0.2600560607910156, 0.26024755859375, 0.2600263671875, 0.26005093383789063, 0.26024139404296875, 0.2604267578125, 0.26019021606445314, 0.2602567749023437, 0.26005093383789063, 0.2601103210449219, 0.2600130615234375, 0.2600140686035156, 0.26001715087890626, 0.2600120849609375, 0.2602270202636719, 0.26012979125976565, 0.26007040405273435, 0.26004376220703124, 0.26005810546875, 0.260284423828125, 0.26034585571289065, 0.2601922607421875, 0.26010726928710937, 0.26014617919921873, 0.26003662109375, 0.26012057495117186, 0.2601257019042969, 0.2601308288574219, 0.2602854309082031, 0.2605189208984375, 0.2602649536132812]",tokens/s,3.7856793941897777,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ff7-1d1113446502c470206c170a;038fac3c-19f9-4bb1-bf98-351fc0712bae) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,2108.223488,15043.395584,0.0,14396.94848,13898.251776,s,10,16.774060791015625,1.6774060791015626,0.0018297475804955295,1.6770918579101561,1.6789085327148436,1.6804502990722656,1.6816837121582031,"[1.6819920654296876, 1.67540869140625, 1.6774444580078125, 1.676206298828125, 1.6767392578125, 1.6753848876953126, 1.6765943603515625, 1.6776787109375, 1.67856591796875, 1.678046142578125]",tokens/s,152.61659248136053,kWh,1.9787035716904537e-05,1.0841899168262898e-05,9.527879844519888e-05,0.0001259077333303663,tokens/kWh,2033234.919163287,MB,2108.223488,15043.395584,0.0,14396.94848,14315.959808,s,10,982.2773984375,98.22773984375,0.006322715714689567,98.22801562500001,98.23432265625,98.236829296875,98.238834609375,"[98.233765625, 98.224484375, 98.2393359375, 98.2251640625, 98.22903125, 98.2138984375, 98.2302578125, 98.2295859375, 98.224875, 98.227]",tokens/s,0.6413666862356148,kWh,0.001159520428114467,0.0006355184711477705,0.005722492522434801,0.0075175314216970384,tokens/kWh,8380.410598374077,,s,629,995.810533813476,1.5831646006573552,0.19919555203142542,1.559141357421875,1.55962734375,1.5598268310546874,3.23526474609375,"[1.5585750732421875, 1.55860986328125, 1.5594239501953124, 1.5595242919921875, 1.558529052734375, 1.5586417236328125, 1.559109619140625, 1.5591219482421874, 1.5598243408203125, 1.5595867919921875, 1.55911572265625, 1.5588905029296876, 1.5587757568359375, 1.5587236328125, 1.559189453125, 1.558750244140625, 1.5590482177734375, 1.5591536865234374, 1.5591956787109376, 1.55924169921875, 1.558993896484375, 1.559488525390625, 1.559013427734375, 1.5594393310546875, 1.5592908935546874, 1.5589068603515626, 1.55905126953125, 1.559194580078125, 1.5587266845703125, 1.5591065673828124, 1.5589693603515624, 1.558802490234375, 1.558849609375, 1.559245849609375, 1.5592437744140626, 1.559234619140625, 1.55915771484375, 1.5592222900390624, 1.5598233642578125, 1.55966162109375, 1.559911376953125, 1.5597353515625, 1.559793701171875, 1.5596707763671875, 1.55985302734375, 1.55943115234375, 1.5597127685546874, 1.5593175048828125, 1.55945263671875, 1.559732177734375, 1.559920654296875, 1.5596063232421875, 1.5595447998046874, 1.5596646728515624, 1.559162841796875, 1.5595458984375, 1.55953662109375, 1.5590595703125, 1.559057373046875, 1.5596236572265625, 1.5592509765625, 1.559478271484375, 3.234529296875, 1.55871337890625, 1.5585946044921875, 1.5587962646484375, 1.5588270263671875, 1.558576171875, 1.558561767578125, 1.55837744140625, 1.5585587158203125, 1.5589652099609375, 1.5587061767578125, 1.55881787109375, 1.558748046875, 1.5588065185546875, 1.5585740966796875, 1.5593819580078125, 1.5591004638671875, 1.558873046875, 1.5587451171875, 1.5594066162109375, 1.559108642578125, 1.55873583984375, 1.5592396240234374, 1.559013427734375, 1.558961181640625, 1.5588013916015624, 1.5588310546875, 1.5591044921875, 1.5594649658203126, 1.558951904296875, 1.5591546630859374, 1.55907177734375, 1.5593482666015626, 1.5590062255859376, 1.559110595703125, 1.5595079345703124, 1.55951513671875, 1.5589560546875, 1.5587255859375, 1.5589560546875, 1.559404541015625, 1.5599503173828124, 1.5588074951171875, 1.5591341552734375, 1.5587113037109375, 1.55890380859375, 1.561585693359375, 1.5592509765625, 1.5593123779296876, 1.5594271240234374, 1.55922021484375, 1.5592386474609374, 1.559277587890625, 1.559350341796875, 1.5596614990234374, 1.5596533203125, 1.559194580078125, 1.5592427978515624, 1.5593604736328126, 1.5594066162109375, 1.559331787109375, 1.5592806396484375, 1.559973876953125, 3.23797607421875, 1.5602718505859374, 1.559963623046875, 1.5592427978515624, 1.5589161376953125, 1.5587706298828126, 1.5589017333984374, 1.559024658203125, 1.559089111328125, 1.5593585205078124, 1.5589334716796874, 1.5592069091796874, 1.5589990234375, 1.5592745361328124, 1.5593963623046876, 1.559119873046875, 1.5596800537109374, 1.5589478759765625, 1.5588074951171875, 1.5590042724609374, 1.558916015625, 1.55900927734375, 1.5591424560546876, 1.559677978515625, 1.5591373291015624, 1.559119873046875, 1.5591474609375, 1.5588157958984374, 1.5592601318359376, 1.5591690673828125, 1.559141357421875, 1.5591485595703125, 1.55908203125, 1.559183349609375, 1.55915576171875, 1.559400390625, 1.5592960205078126, 1.5591761474609376, 1.5593154296875, 1.5590482177734375, 1.558973388671875, 1.559626708984375, 1.559330810546875, 1.5593973388671876, 1.5593338623046875, 1.559593994140625, 1.5598807373046875, 1.5612364501953124, 1.55947412109375, 1.5594168701171875, 1.5596483154296874, 1.5595643310546874, 1.559582763671875, 1.55965234375, 1.5598623046875, 1.559635986328125, 1.5595242919921875, 1.5597813720703124, 1.5598643798828125, 1.5591075439453126, 1.5595611572265624, 1.55919873046875, 1.5593585205078124, 3.23542333984375, 1.55846044921875, 1.558307861328125, 1.5589805908203125, 1.558640625, 1.5585218505859375, 1.55879833984375, 1.558877197265625, 1.5586907958984375, 1.5587757568359375, 1.5589539794921874, 1.559329833984375, 1.559342041015625, 1.5588546142578126, 1.5590174560546874, 1.559341064453125, 1.558877197265625, 1.5588382568359376, 1.55911474609375, 1.5592764892578126, 1.5589847412109374, 1.5587337646484376, 1.55911376953125, 1.5587901611328125, 1.5588280029296875, 1.5588690185546874, 1.5593216552734375, 1.559299072265625, 1.559120849609375, 1.558898681640625, 1.5588414306640626, 1.55898876953125, 1.5588966064453125, 1.5587901611328125, 1.558739990234375, 1.5589232177734376, 1.5593912353515624, 1.55947314453125, 1.559403564453125, 1.559525390625, 1.559373779296875, 1.5593902587890625, 1.5591658935546875, 1.559456787109375, 1.5621683349609374, 1.558978515625, 1.5591126708984375, 1.55909326171875, 1.5591383056640624, 1.5590767822265625, 1.55901025390625, 1.5589765625, 1.559456787109375, 1.5590185546875, 1.55949462890625, 1.55913623046875, 1.5593287353515626, 1.559487548828125, 1.559192626953125, 1.55945263671875, 1.5594342041015625, 1.5596298828125, 1.5594813232421876, 3.236599853515625, 1.5593328857421875, 1.5589642333984375, 1.55854541015625, 1.558728759765625, 1.558466552734375, 1.55877783203125, 1.5593133544921876, 1.5591546630859374, 1.5588116455078125, 1.5587174072265626, 1.5586641845703124, 1.558667236328125, 1.558561767578125, 1.5583734130859375, 1.558703125, 1.5588648681640624, 1.55964111328125, 1.5586856689453126, 1.558662109375, 1.5585587158203125, 1.558540283203125, 1.5592540283203125, 1.558740966796875, 1.559257080078125, 1.5590430908203126, 1.5589775390625, 1.558866943359375, 1.5591044921875, 1.5596021728515626, 1.559546875, 1.55960009765625, 1.55951513671875, 1.5589847412109374, 1.5592540283203125, 1.559413818359375, 1.5594434814453124, 1.5591956787109376, 1.559342041015625, 1.5589847412109374, 1.5589805908203125, 1.5593165283203125, 1.559119873046875, 1.5594117431640624, 1.5589334716796874, 1.5593184814453125, 1.559287841796875, 1.5593902587890625, 1.5601192626953124, 1.5590482177734375, 1.559271484375, 1.559646240234375, 1.5597353515625, 1.5595601806640624, 1.5594761962890624, 1.559477294921875, 1.5593011474609375, 1.5621007080078124, 1.559258056640625, 1.5594239501953124, 1.559357421875, 1.5594639892578126, 1.559361572265625, 3.23485693359375, 1.5584061279296875, 1.5584429931640624, 1.558455322265625, 1.558666259765625, 1.5583436279296874, 1.5583365478515625, 1.55860888671875, 1.55905224609375, 1.558590576171875, 1.558578125, 1.5587183837890626, 1.55867236328125, 1.5587255859375, 1.5585269775390624, 1.5587215576171876, 1.558656982421875, 1.5586744384765625, 1.5588331298828124, 1.559310302734375, 1.558556640625, 1.5583118896484376, 1.5588935546875, 1.5596380615234375, 1.559288818359375, 1.558983642578125, 1.559357421875, 1.559225341796875, 1.558750244140625, 1.558867919921875, 1.5587225341796875, 1.5589283447265625, 1.5586417236328125, 1.5587911376953125, 1.5586907958984375, 1.559267333984375, 1.5594957275390624, 1.558765625, 1.5612979736328125, 1.5589273681640625, 1.5587225341796875, 1.558982666015625, 1.5588209228515626, 1.5590604248046875, 1.558908935546875, 1.5590697021484374, 1.5586539306640625, 1.5587706298828126, 1.5595069580078125, 1.559635986328125, 1.5590185546875, 1.55902978515625, 1.5588546142578126, 1.5590277099609375, 1.5595592041015625, 1.5593226318359374, 1.5589283447265625, 1.5589744873046876, 1.5589171142578124, 1.5588587646484375, 1.5594556884765625, 1.5590963134765625, 1.5595478515625, 3.235958740234375, 1.5587061767578125, 1.5582955322265626, 1.558918212890625, 1.559405517578125, 1.5590072021484376, 1.559066650390625, 1.559258056640625, 1.558982666015625, 1.5591373291015624, 1.5590123291015625, 1.559109619140625, 1.5587010498046876, 1.5590697021484374, 1.558677490234375, 1.558846435546875, 1.559234619140625, 1.55937890625, 1.558992919921875, 1.5595223388671875, 1.559162841796875, 1.5587706298828126, 1.559435302734375, 1.560753173828125, 1.55900830078125, 1.5587052001953126, 1.5587420654296875, 1.55875634765625, 1.5585545654296875, 1.55881884765625, 1.5590809326171875, 1.559193603515625, 1.559277587890625, 1.55932568359375, 1.55913623046875, 1.5592764892578126, 1.559235595703125, 1.5595654296875, 1.559406494140625, 1.5591424560546876, 1.558719482421875, 1.55981103515625, 1.5590491943359375, 1.5598408203125, 1.55958984375, 1.55905029296875, 1.5590296630859375, 1.5592437744140626, 1.559709716796875, 1.559160888671875, 1.5592642822265625, 1.55951513671875, 1.559525390625, 1.55964111328125, 1.559710693359375, 1.5596390380859375, 1.5594691162109375, 1.55936767578125, 1.5593133544921876, 1.5591116943359375, 1.5594761962890624, 1.5594700927734375, 1.5595284423828124, 3.23728076171875, 1.558865966796875, 1.5586630859375, 1.5593482666015626, 1.5598438720703125, 1.559294921875, 1.559099365234375, 1.5591485595703125, 1.558935546875, 1.55944970703125, 1.558636474609375, 1.5588331298828124, 1.5585986328125, 1.558814697265625, 1.55905224609375, 1.5590338134765624, 1.56031591796875, 1.5590655517578125, 1.5592652587890625, 1.5588966064453125, 1.5590400390625, 1.5587420654296875, 1.5598909912109375, 1.559056396484375, 1.5592652587890625, 1.5594056396484375, 1.5589385986328126, 1.5587860107421876, 1.55886083984375, 1.5590543212890624, 1.559300048828125, 1.5590921630859376, 1.559215087890625, 1.5589744873046876, 1.5589600830078125, 1.5591177978515625, 1.5594957275390624, 1.5592960205078126, 1.558877197265625, 1.5592017822265625, 1.5588270263671875, 1.5589222412109376, 1.5591588134765626, 1.559298095703125, 1.5588055419921876, 1.5590748291015626, 1.559099365234375, 1.5595028076171875, 1.5594915771484374, 1.5594495849609376, 1.559300048828125, 1.55938916015625, 1.5596697998046876, 1.5595755615234375, 1.5595201416015625, 1.5593564453125, 1.5596134033203124, 1.55945263671875, 1.5595294189453126, 1.5592764892578126, 1.559214111328125, 1.55919873046875, 1.5595919189453125, 3.238277099609375, 1.5593380126953125, 1.558845458984375, 1.559089111328125, 1.5596502685546876, 1.559151611328125, 1.55905126953125, 1.5589949951171875, 1.558698974609375, 1.5589119873046875, 1.559118896484375, 1.5591055908203124, 1.5588433837890625, 1.55887109375, 1.559098388671875, 1.559488525390625, 1.5590921630859376, 1.5589212646484376, 1.5586201171875, 1.5587542724609376, 1.5588372802734376, 1.5583734130859375, 1.559034912109375, 1.5590389404296876, 1.5593184814453125, 1.559047119140625, 1.559078857421875, 1.5590113525390625, 1.5589908447265626, 1.5593114013671876, 1.559214111328125, 1.559314453125, 1.5593348388671875, 1.5594014892578125, 1.5593011474609375, 1.55919873046875, 1.5591455078125, 1.559130126953125, 1.5589488525390625, 1.55922021484375, 1.5590164794921875, 1.55928369140625, 1.5593359375, 1.55940869140625, 1.5589334716796874, 1.5590072021484376, 1.5590543212890624, 1.5592960205078126, 1.5591453857421875, 1.5592960205078126, 1.55916796875, 1.55928271484375, 1.559329833984375, 1.5590318603515625, 1.5594649658203126, 1.5592314453125, 1.55926123046875, 1.5591290283203125, 1.5590205078125, 1.5590174560546874, 1.559034912109375, 1.5591658935546875, 1.559373779296875, 3.237329833984375, 1.558613037109375, 1.5583804931640626, 1.5587542724609376, 1.5585423583984375, 1.559004150390625, 1.5588382568359376, 1.55866015625, 1.5590809326171875, 1.558877197265625, 1.558772705078125, 1.558992919921875, 1.558794189453125, 1.559083984375, 1.55869287109375, 1.5590604248046875, 1.55894580078125, 1.5587255859375, 1.5587962646484375, 1.558877197265625, 1.5590615234375, 1.558513671875, 1.5592530517578125, 1.5587318115234374, 1.5593697509765625, 1.559119873046875, 1.5611954345703125, 1.559103515625, 1.5589058837890626, 1.5593779296875, 1.559484375, 1.5593184814453125, 1.5594208984375, 1.5591065673828124, 1.559189453125, 1.5593656005859375, 1.5593656005859375, 1.55962060546875, 1.5591669921875, 1.5591322021484375, 1.559208984375, 1.5593665771484375, 1.559109619140625, 1.5592222900390624, 1.5591434326171876, 1.559194580078125, 1.5592008056640625, 1.55911376953125, 1.55950390625, 1.5590687255859375, 1.559582763671875, 1.5593564453125, 1.5598284912109375, 1.5596851806640626, 1.55951513671875, 1.55947216796875, 1.5594691162109375, 1.5591895751953124, 1.5594127197265626, 1.55911572265625, 1.559299072265625, 1.5592508544921875, 1.5591474609375]",tokens/s,0.6316462606508407,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3bc-5ba8b97d3a0733531c2ab339;5500863e-37e4-4e73-84e7-6a3203316978) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpex8vqv1r/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc411-302dbb391b17bd0b1224c450;6e724492-4d4e-4bdd-b44a-8ee771eb5e37) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3013.357568,9259.450368,0.0,8613.003264,8211.364864,s,10,10.943852783203125,1.0943852783203125,0.00185809213941572,1.0943612060546875,1.0968373901367188,1.0970477233886717,1.0972159899902343,"[1.097258056640625, 1.09552197265625, 1.0923255615234375, 1.0922135009765626, 1.09405712890625, 1.091574951171875, 1.0937421875, 1.094665283203125, 1.0957034912109376, 1.0967906494140625]",tokens/s,233.92127532354476,kWh,1.2894171526034674e-05,7.065532383967365e-06,5.862768579100175e-05,7.858738970100378e-05,tokens/kWh,3257520.080180627,MB,3013.357568,9330.753536,0.0,8684.306432,8503.627264,s,10,640.7681367187499,64.07681367187499,0.020824056597130436,64.070240234375,64.11088984375,64.113944921875,64.116388984375,"[64.1102109375, 64.0801015625, 64.117, 64.0658984375, 64.065828125, 64.06840234375, 64.058859375, 64.04678125, 64.072078125, 64.0829765625]",tokens/s,0.9831949560196744,kWh,0.000756398006098138,0.0004145729534449856,0.003549850923211795,0.004720821882754919,tokens/kWh,13345.133869620864,,s,629,649.5949702148431,1.032742400977494,0.1300197569102036,1.016932373046875,1.018054443359375,1.0185316284179688,2.1104683203125,"[1.0186362915039062, 1.0175692749023437, 1.0181939086914062, 1.0167439575195312, 1.0171248779296875, 1.016279052734375, 1.0170603637695312, 1.0164469604492188, 1.019283447265625, 1.0177188110351563, 1.0175784912109376, 1.0175529174804687, 1.0177136840820313, 1.0184437866210938, 1.0184273681640625, 1.0175098876953126, 1.0180792236328124, 1.0184560546875, 1.0168545532226563, 1.0170511474609376, 1.0169282836914062, 1.0172200927734374, 1.0169989013671874, 1.016431640625, 1.01659033203125, 1.0177269897460937, 1.0169927978515625, 1.0175907592773437, 1.0173204345703124, 1.0170286254882812, 1.0172518310546874, 1.0171300048828125, 1.0176942749023437, 1.0188645629882813, 1.0181181640625, 1.0181754760742188, 1.0174935302734376, 1.0177156982421875, 1.0180280151367187, 1.0169384765625, 1.0188124389648439, 1.0189732055664062, 1.01850830078125, 1.0174976196289063, 1.0177904663085937, 1.0171678466796874, 1.0175887451171874, 1.0177188110351563, 1.017942138671875, 1.019806640625, 1.0175037231445312, 1.0165176391601562, 1.0163671264648437, 1.018239990234375, 1.0174945068359376, 1.0187335815429688, 1.0170091552734375, 1.017196533203125, 1.0169682006835938, 1.0169784545898437, 1.0177904663085937, 1.0165442504882813, 2.11431005859375, 1.017186279296875, 1.0167265014648437, 1.0168094482421874, 1.016437744140625, 1.0170368041992188, 1.0164561767578124, 1.0164777221679688, 1.01673779296875, 1.0177433471679687, 1.0164715576171874, 1.0170480346679687, 1.0178303833007813, 1.0165759887695311, 1.0164664306640625, 1.0171217651367188, 1.0163681030273437, 1.0164623413085938, 1.0167490844726563, 1.016791015625, 1.0163056640625, 1.0171576538085938, 1.0169917602539063, 1.017670654296875, 1.0177689819335938, 1.0164049682617187, 1.0167510986328125, 1.0171494140625, 1.0164398193359374, 1.0172323608398437, 1.0166558837890625, 1.0169886474609375, 1.01707470703125, 1.0176522216796875, 1.0164090576171876, 1.0175907592773437, 1.016195068359375, 1.0165288696289063, 1.0176030883789062, 1.0171300048828125, 1.0172190551757812, 1.01783447265625, 1.0167275390625, 1.0168729858398438, 1.016838134765625, 1.01656884765625, 1.0176798706054688, 1.0164961547851563, 1.0169405517578125, 1.0169313354492187, 1.0181314697265624, 1.0185051879882812, 1.0174259033203126, 1.0190386962890625, 1.0177259521484374, 1.0175364990234375, 1.01764404296875, 1.0178928833007812, 1.0174586791992188, 1.01879296875, 1.0175662231445313, 1.0174638061523438, 1.0168340454101563, 2.110869384765625, 1.0163967895507813, 1.0164827880859375, 1.0182993774414062, 1.017069580078125, 1.0174013671875, 1.017512939453125, 1.0179420166015625, 1.0180894775390625, 1.0180361938476563, 1.0178211669921875, 1.0178641967773439, 1.0179297485351562, 1.019236328125, 1.0181181640625, 1.0165933837890626, 1.0170029907226563, 1.0190684814453126, 1.0187539672851562, 1.0192598876953125, 1.0180260009765625, 1.018945556640625, 1.0177413330078124, 1.0181325073242187, 1.0181212158203126, 1.018977294921875, 1.0179379272460938, 1.0190817260742187, 1.0184038696289062, 1.0187530517578125, 1.0190274658203125, 1.0183782348632813, 1.0174586791992188, 1.0177177734375, 1.0169108276367187, 1.0171514892578124, 1.0180515747070313, 1.0181908569335938, 1.0179368896484375, 1.0182778930664063, 1.0189833984375, 1.0178088989257812, 1.0176030883789062, 1.0174474487304688, 1.01793994140625, 1.0195138549804688, 1.0180485229492187, 1.0181898193359376, 1.0169937744140625, 1.0169609985351562, 1.016900634765625, 1.0166210327148437, 1.0164254760742188, 1.0164019165039062, 1.0165575561523437, 1.0168330078125, 1.016764404296875, 1.016880126953125, 1.0165565185546874, 1.0169978637695312, 1.016406005859375, 1.0169456787109374, 1.0166640625, 2.10943701171875, 1.0166231079101562, 1.0164449462890626, 1.0163446044921876, 1.0166394653320312, 1.0180321044921874, 1.0164285278320313, 1.016468505859375, 1.0164756469726564, 1.0164132080078125, 1.0169036865234375, 1.0166927490234374, 1.0170050659179688, 1.0168923950195312, 1.0167388305664062, 1.0166712036132812, 1.016595458984375, 1.0170153198242187, 1.016637451171875, 1.0168944702148437, 1.0178826293945313, 1.0176430053710936, 1.017217041015625, 1.0173972778320313, 1.0166896362304687, 1.0168361206054688, 1.0170224609375, 1.0166179809570313, 1.0168811645507811, 1.0171064453125, 1.0169569091796875, 1.017037841796875, 1.0164387817382812, 1.0170715942382813, 1.0171002807617187, 1.0174781494140626, 1.01707470703125, 1.0163302612304688, 1.0170009765625, 1.0169763793945312, 1.0165155639648438, 1.0176727294921875, 1.0179563598632813, 1.0177484741210938, 1.0178109741210937, 1.016742919921875, 1.0165545043945312, 1.016642578125, 1.0165452880859376, 1.0169088134765625, 1.0167982177734376, 1.01705419921875, 1.0169876708984376, 1.0166353759765625, 1.01646337890625, 1.016869873046875, 1.0170398559570313, 1.0165657958984375, 1.016586181640625, 1.0168739624023437, 1.0175160522460938, 1.0169579467773437, 1.0165821533203125, 2.1117490234375, 1.016916015625, 1.017007080078125, 1.0167859497070313, 1.0164111328125, 1.016964111328125, 1.0174464111328125, 1.016838134765625, 1.016722412109375, 1.01697021484375, 1.0166876220703125, 1.0165493774414063, 1.0166353759765625, 1.0168402099609375, 1.0164602661132813, 1.016764404296875, 1.0166251220703124, 1.0166599731445312, 1.0170848999023439, 1.0168576049804687, 1.01642138671875, 1.0169517822265626, 1.01652685546875, 1.016585205078125, 1.0163035888671874, 1.0168361206054688, 1.0171637573242187, 1.01675927734375, 1.0166220703125, 1.0165278930664063, 1.0162329711914062, 1.0164930419921876, 1.0165626831054688, 1.0164500732421875, 1.0164182739257812, 1.016795166015625, 1.0171268920898437, 1.0172815551757812, 1.0167838745117188, 1.0167255249023437, 1.0174822387695313, 1.0175631103515625, 1.0172815551757812, 1.0165330200195313, 1.0170941162109375, 1.0168494262695313, 1.0168780517578124, 1.0167992553710938, 1.0165073852539062, 1.01684326171875, 1.0168442993164062, 1.0172507934570312, 1.0167459716796876, 1.0169251708984375, 1.017218017578125, 1.0175836181640625, 1.0206064453125, 1.0173880615234374, 1.017280517578125, 1.0169978637695312, 1.0173460693359375, 1.0168074340820312, 1.0165514526367188, 2.109128662109375, 1.0168524780273438, 1.01806591796875, 1.0175098876953126, 1.0182809448242187, 1.0176317138671875, 1.016975341796875, 1.0173204345703124, 1.0172610473632813, 1.0172006225585937, 1.017169921875, 1.0174801635742188, 1.0167971801757814, 1.018260498046875, 1.01722314453125, 1.0176788330078126, 1.0169375, 1.0171392211914063, 1.01722314453125, 1.01747509765625, 1.017091064453125, 1.0181314697265624, 1.0181068725585938, 1.0168790893554687, 1.0172938232421875, 1.016933349609375, 1.0170153198242187, 1.0169262084960937, 1.0168422241210937, 1.0174187622070312, 1.016806396484375, 1.0169446411132812, 1.0168545532226563, 1.0171279296875, 1.0165452880859376, 1.0166732788085937, 1.0163753051757813, 1.0164019165039062, 1.0169395141601563, 1.016479736328125, 1.0163753051757813, 1.0167285766601561, 1.016975341796875, 1.0166343383789063, 1.016521728515625, 1.0164971313476563, 1.01644287109375, 1.0162514038085937, 1.0162565307617188, 1.0164049682617187, 1.0166456298828126, 1.0162913208007813, 1.0164541625976562, 1.0166067504882812, 1.0164879150390624, 1.0165186767578125, 1.0164029541015625, 1.0165452880859376, 1.0164561767578124, 1.016764404296875, 1.0167705688476563, 1.0167193603515625, 1.016838134765625, 2.111257568359375, 1.0166507568359375, 1.0164859008789062, 1.0166497192382813, 1.0162554931640626, 1.0165023803710938, 1.0166967163085938, 1.016848388671875, 1.0164193115234375, 1.0167725830078125, 1.01661181640625, 1.0165176391601562, 1.017017333984375, 1.0167285766601561, 1.0165155639648438, 1.0169896850585938, 1.0168555297851563, 1.016900634765625, 1.0165892944335937, 1.0169630737304687, 1.0168340454101563, 1.0165022583007812, 1.01639990234375, 1.0166077270507812, 1.0167879638671875, 1.0163414916992188, 1.016711181640625, 1.016896484375, 1.0175538940429687, 1.0173655395507812, 1.01714013671875, 1.0171791381835937, 1.016394775390625, 1.0165196533203125, 1.0164090576171876, 1.0165565185546874, 1.0166087646484374, 1.0164295654296875, 1.0164623413085938, 1.016205322265625, 1.0160199584960938, 1.0161571655273438, 1.0178303833007813, 1.0175641479492188, 1.0178211669921875, 1.0177904663085937, 1.0168576049804687, 1.0169139404296874, 1.016748046875, 1.0167572631835937, 1.0166681518554688, 1.0166241455078124, 1.0170941162109375, 1.0179061889648438, 1.0177638549804688, 1.0172477416992187, 1.01701220703125, 1.017195556640625, 1.0166302490234376, 1.0166353759765625, 1.0164111328125, 1.0163353881835937, 1.0165473022460938, 2.111709228515625, 1.016605712890625, 1.0165575561523437, 1.0163179321289062, 1.0160752563476563, 1.0163681030273437, 1.0162769775390625, 1.01686376953125, 1.0165380859375, 1.01652685546875, 1.0169149169921874, 1.0168729858398438, 1.016648681640625, 1.0167265014648437, 1.0163302612304688, 1.0163599243164063, 1.0167684936523438, 1.0168627319335937, 1.0168145751953126, 1.0170839233398437, 1.0169886474609375, 1.016784912109375, 1.0166917114257812, 1.016859619140625, 1.0169098510742187, 1.0174157104492187, 1.0167152709960938, 1.0165821533203125, 1.0165196533203125, 1.0169415893554687, 1.01663232421875, 1.016784912109375, 1.0165791015625, 1.0167817993164063, 1.0162913208007813, 1.016732666015625, 1.0160650024414062, 1.0161500244140624, 1.0161346435546874, 1.0164756469726564, 1.0165278930664063, 1.0166435546875, 1.0165442504882813, 1.0163240966796876, 1.0163547973632812, 1.0165545043945312, 1.016332275390625, 1.0163927001953126, 1.0162432250976563, 1.0166128540039063, 1.01661181640625, 1.0168319702148438, 1.0166835327148438, 1.0168361206054688, 1.0165135498046876, 1.0166527709960937, 1.0165084228515624, 1.0164992065429688, 1.016553466796875, 1.0167490844726563, 1.0172364501953124, 1.0167316284179688, 1.0162698364257812, 2.113395751953125, 1.017417724609375, 1.0168319702148438, 1.016943603515625, 1.0170357666015626, 1.01726513671875, 1.016616943359375, 1.0171105346679687, 1.0168176879882813, 1.0170306396484374, 1.0163834838867187, 1.0166005859375, 1.0165791015625, 1.0170603637695312, 1.0170880126953126, 1.0176962280273438, 1.0171422729492188, 1.0168514404296876, 1.0166578979492187, 1.0166784057617186, 1.01640087890625, 1.0165043334960937, 1.0165616455078126, 1.0163988647460938, 1.016511474609375, 1.0165104370117188, 1.0166610107421874, 1.01673779296875, 1.016742919921875, 1.0167920532226562, 1.0162626342773438, 1.0163783569335938, 1.0163292236328125, 1.0166128540039063, 1.0167019653320313, 1.0171371459960938, 1.0175958862304688, 1.0170101928710937, 1.0169866333007813, 1.0172344360351562, 1.0172160034179687, 1.016932373046875, 1.0171678466796874, 1.0173429565429688, 1.0172846069335937, 1.0185523071289062, 1.0170470581054687, 1.0176522216796875, 1.017080810546875, 1.0174044189453124, 1.0174361572265624, 1.017650146484375, 1.0165667724609375, 1.0164510498046875, 1.0166917114257812, 1.0181375732421876, 1.0173368530273437, 1.0173501586914062, 1.0173982543945312, 1.0175170288085937, 1.0172129516601562, 1.0174832763671875, 1.017628662109375, 2.11508642578125, 1.01734912109375, 1.0171422729492188, 1.0165667724609375, 1.0170706176757813, 1.0171514892578124, 1.0170203857421876, 1.0170941162109375, 1.017049072265625, 1.016974365234375, 1.0170296020507812, 1.0174832763671875, 1.0167408447265625, 1.01798193359375, 1.0171473999023437, 1.016896484375, 1.0165084228515624, 1.0163824462890625, 1.016531982421875, 1.016742919921875, 1.0175529174804687, 1.0176614379882813, 1.0177628173828126, 1.0172733154296876, 1.017101318359375, 1.0169559326171875, 1.0170614013671875, 1.0167142333984376, 1.01680126953125, 1.0172303466796875, 1.0184417114257813, 1.0178109741210937, 1.0175836181640625, 1.0166937866210937, 1.016395751953125, 1.0165514526367188, 1.016395751953125, 1.0163200073242187, 1.0162093505859375, 1.0166200561523437, 1.0170992431640624, 1.0166538696289062, 1.0170900268554688, 1.0167030029296875, 1.0169343872070313, 1.0181693725585939, 1.018203125, 1.0185471801757813, 1.0176788330078126, 1.0179194946289063, 1.0182062377929688, 1.0176983032226563, 1.0169210815429688, 1.0170449829101562, 1.0173613891601563, 1.016896484375, 1.0173850708007812, 1.0171493530273437, 1.0174464111328125, 1.0179911499023437, 1.0175467529296875, 1.017291748046875, 1.0167869262695313]",tokens/s,0.9682956747524812,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc311-78808eb72c1a1a441e04c8e8;9b5db6e7-4e75-49c5-b6c4-97124ae43da7) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3011.678208,9259.450368,0.0,8613.003264,8211.364864,s,10,10.948309448242187,1.0948309448242186,0.0017810888898920402,1.0946644287109375,1.0975797607421875,1.097637109375,1.09768298828125,"[1.0947955322265626, 1.0975670166015625, 1.0945333251953124, 1.09283447265625, 1.093535400390625, 1.0924375, 1.0933472900390624, 1.0952611083984376, 1.0963033447265624, 1.0976944580078125]",tokens/s,233.8260543422092,kWh,1.2896189077032937e-05,7.064633461468475e-06,6.102240992899577e-05,8.098323246749718e-05,tokens/kWh,3161148.20562079,MB,3011.678208,9330.753536,0.0,8684.306432,8503.627264,s,10,640.83317578125,64.083317578125,0.01786692489370897,64.076908203125,64.113046875,64.1131875,64.1133,"[64.113015625, 64.113328125, 64.1000546875, 64.0766796875, 64.07603515625, 64.07292578125, 64.06015625, 64.0786328125, 64.0652109375, 64.07713671875]",tokens/s,0.983095170177413,kWh,0.0007565677230060101,0.0004146680905150424,0.003575465860370403,0.0047467016738914555,tokens/kWh,13272.374024793335,,s,629,649.6630466918949,1.0328506306707386,0.1300798719514248,1.0170562744140625,1.01796640625,1.0183720947265624,2.11047625,"[1.0181212158203126, 1.0173245239257813, 1.0185758666992188, 1.0180556640625, 1.018123291015625, 1.0182379760742188, 1.0183464965820312, 1.0172989501953125, 1.0187325439453125, 1.0184806518554688, 1.0173450317382813, 1.0169671630859376, 1.0177156982421875, 1.016869873046875, 1.0170828857421874, 1.0171054077148438, 1.017406494140625, 1.0178303833007813, 1.0179625244140624, 1.0176153564453125, 1.0172446899414063, 1.0175590209960939, 1.0186322021484375, 1.0177269897460937, 1.017723876953125, 1.0172518310546874, 1.0174484252929688, 1.0174822387695313, 1.0188011474609375, 1.0174320678710937, 1.0174351196289062, 1.0169671630859376, 1.0170900268554688, 1.0175518798828125, 1.0169671630859376, 1.0169815063476562, 1.01698046875, 1.017164794921875, 1.0184673461914062, 1.0172026977539061, 1.0175672607421875, 1.017354248046875, 1.0178375854492188, 1.01718017578125, 1.0172631225585937, 1.0175252685546874, 1.0175877075195312, 1.0180198364257813, 1.01806591796875, 1.0169200439453125, 1.0169722900390625, 1.0176399536132812, 1.0175457153320313, 1.0184530029296874, 1.0189946899414062, 1.017486328125, 1.0173870239257812, 1.0177218627929687, 1.0176204833984375, 1.0178468017578124, 1.0180025024414063, 1.0178231811523437, 2.115484619140625, 1.0167900390625, 1.0173245239257813, 1.0184478759765625, 1.0185051879882812, 1.018660888671875, 1.0180894775390625, 1.01798193359375, 1.0169508056640626, 1.0166098022460937, 1.0167337036132813, 1.0166784057617186, 1.0166610107421874, 1.0168370971679688, 1.0182635498046875, 1.017359375, 1.017260009765625, 1.0175375366210937, 1.0179983520507812, 1.0172620849609375, 1.0179829711914063, 1.0172477416992187, 1.0176378784179687, 1.0178222045898437, 1.018113037109375, 1.0180044555664063, 1.0183690185546874, 1.0185379638671874, 1.017069580078125, 1.0172241821289063, 1.0169231567382813, 1.0170255126953125, 1.016953857421875, 1.0170449829101562, 1.0174218139648437, 1.0171945190429688, 1.0167879638671875, 1.0176768188476562, 1.0178262939453124, 1.017734130859375, 1.01724365234375, 1.0175989990234375, 1.0170029907226563, 1.0170224609375, 1.0177013549804688, 1.0169600219726562, 1.0178672485351563, 1.0179348754882813, 1.01819287109375, 1.0178980102539064, 1.0185748291015626, 1.0183229370117188, 1.0191605834960937, 1.0187940063476562, 1.0190294799804687, 1.018576904296875, 1.0186598510742189, 1.0183741455078126, 1.0177904663085937, 1.0180341796875, 1.0173480834960937, 1.0171412353515625, 1.017723876953125, 2.11309765625, 1.0174781494140626, 1.0174166870117187, 1.0177935180664062, 1.0169886474609375, 1.0179368896484375, 1.01819189453125, 1.0179256591796875, 1.01707568359375, 1.0180884399414063, 1.0175416259765624, 1.0177484741210938, 1.01865673828125, 1.01788671875, 1.0186629028320313, 1.0175426635742189, 1.0182052001953126, 1.0181621704101562, 1.017913330078125, 1.0183598022460938, 1.0172467041015625, 1.0170460205078125, 1.0173931274414063, 1.0176419677734374, 1.0177720336914062, 1.016953857421875, 1.01732763671875, 1.0172713012695314, 1.01722216796875, 1.0184335327148437, 1.0174474487304688, 1.0173317260742187, 1.017807861328125, 1.0172548828125, 1.0176010131835938, 1.0181068725585938, 1.0175713500976562, 1.0182727661132813, 1.0180116577148437, 1.0184151000976562, 1.0177402954101562, 1.017481201171875, 1.0176061401367187, 1.0172026977539061, 1.016680419921875, 1.0167982177734376, 1.016784912109375, 1.0172897338867188, 1.0168883056640625, 1.01704296875, 1.0168678588867188, 1.0165729370117187, 1.0167131958007813, 1.0166456298828126, 1.0165667724609375, 1.0168145751953126, 1.016875, 1.0166814575195313, 1.0167070922851562, 1.0168309936523436, 1.0168156127929688, 1.01669580078125, 1.01747607421875, 2.11044970703125, 1.0169733276367188, 1.0170706176757813, 1.0172846069335937, 1.0169989013671874, 1.0167142333984376, 1.01673779296875, 1.0168524780273438, 1.0172088623046875, 1.017069580078125, 1.0166159057617188, 1.0165391235351562, 1.0165780639648438, 1.016901611328125, 1.0178887939453125, 1.0169794311523437, 1.0168237915039062, 1.017164794921875, 1.017049072265625, 1.0169600219726562, 1.0166497192382813, 1.0166763305664062, 1.0166876220703125, 1.017296875, 1.0177669067382813, 1.0170214233398438, 1.01680126953125, 1.0170153198242187, 1.0169190673828126, 1.0170368041992188, 1.0171064453125, 1.0172354736328124, 1.0170460205078125, 1.0172548828125, 1.017270263671875, 1.01686474609375, 1.016853515625, 1.01725390625, 1.0170050659179688, 1.0172303466796875, 1.01722314453125, 1.01692724609375, 1.0170449829101562, 1.0169825439453124, 1.0179143676757811, 1.0177525634765625, 1.0174228515625, 1.0177669067382813, 1.0174218139648437, 1.017470947265625, 1.016974365234375, 1.01671728515625, 1.0168442993164062, 1.01711669921875, 1.0169733276367188, 1.017064453125, 1.016784912109375, 1.0175057983398437, 1.0172620849609375, 1.0171259155273438, 1.0172252197265625, 1.0170562744140625, 1.0171351318359374, 2.110795654296875, 1.0166190185546875, 1.0167347412109375, 1.0169712524414063, 1.0166886596679687, 1.016806396484375, 1.0165883178710937, 1.0169190673828126, 1.0171340942382812, 1.0177515258789063, 1.0171422729492188, 1.0173849487304687, 1.0172548828125, 1.0168411865234375, 1.0169845581054688, 1.0166845703125, 1.0168084716796875, 1.017049072265625, 1.0166876220703125, 1.0172395629882813, 1.016953857421875, 1.016732666015625, 1.0170419311523438, 1.016896484375, 1.016853515625, 1.0166179809570313, 1.0168473510742186, 1.0165565185546874, 1.0171422729492188, 1.0176635131835938, 1.0167357177734375, 1.0171463623046875, 1.017027587890625, 1.0171187133789064, 1.0174617309570313, 1.0175324096679688, 1.01707568359375, 1.017122802734375, 1.0175518798828125, 1.0175703125, 1.0169467163085937, 1.0168923950195312, 1.0171740112304688, 1.0170286254882812, 1.0166773681640624, 1.0171105346679687, 1.0168504028320313, 1.0169476928710937, 1.0173501586914062, 1.0176942138671874, 1.0172774658203125, 1.0176573486328124, 1.01741259765625, 1.0166753540039062, 1.0169313354492187, 1.0168473510742186, 1.016975341796875, 1.0182573852539063, 1.0172119140625, 1.017554931640625, 1.017101318359375, 1.0170040283203126, 1.0173255615234376, 2.110369873046875, 1.0170951538085937, 1.0169763793945312, 1.0166558837890625, 1.017407470703125, 1.0166405029296874, 1.0165350341796875, 1.0173060913085938, 1.0170572509765625, 1.0168780517578124, 1.0168340454101563, 1.0168545532226563, 1.0169609985351562, 1.0170890502929688, 1.0174730224609374, 1.0169528198242188, 1.0169354248046876, 1.0171760864257813, 1.0170286254882812, 1.0169978637695312, 1.0167285766601561, 1.016795166015625, 1.0171883544921876, 1.0171248779296875, 1.0169425659179687, 1.0167398681640625, 1.016647705078125, 1.017091064453125, 1.0167838745117188, 1.0169794311523437, 1.0168053588867187, 1.0165780639648438, 1.0166886596679687, 1.0169876708984376, 1.016974365234375, 1.017037841796875, 1.0167971801757814, 1.0169210815429688, 1.0174884033203124, 1.0167941284179687, 1.0167725830078125, 1.0167675170898438, 1.0176215209960937, 1.0169835815429686, 1.017260009765625, 1.0171054077148438, 1.017228271484375, 1.01694873046875, 1.0170050659179688, 1.017069580078125, 1.017248779296875, 1.01722216796875, 1.0173972778320313, 1.017143310546875, 1.0172692260742187, 1.0171156616210937, 1.0175375366210937, 1.0173552856445311, 1.0170654907226562, 1.0171207885742188, 1.017069580078125, 1.0168463134765624, 1.0169405517578125, 2.110486572265625, 1.0164469604492188, 1.0168002319335938, 1.016369140625, 1.0169415893554687, 1.0168094482421874, 1.016580078125, 1.0167101440429687, 1.0166886596679687, 1.0167756958007812, 1.0168053588867187, 1.0167296142578126, 1.0168402099609375, 1.01680126953125, 1.0167633666992189, 1.0166968383789063, 1.01671630859375, 1.0167347412109375, 1.0167684936523438, 1.01724365234375, 1.016858642578125, 1.01710546875, 1.0170787353515625, 1.0171729736328126, 1.0169989013671874, 1.0165247802734374, 1.0167285766601561, 1.0167684936523438, 1.0166016235351563, 1.0166517944335938, 1.0164653930664063, 1.0165411987304687, 1.0165831909179688, 1.01671630859375, 1.0165924072265624, 1.0165606689453126, 1.016543212890625, 1.0166599731445312, 1.0167716064453125, 1.0168186645507813, 1.01701123046875, 1.0169200439453125, 1.0169108276367187, 1.0166343383789063, 1.016553466796875, 1.0164080810546876, 1.0169200439453125, 1.016774658203125, 1.01694873046875, 1.0167030029296875, 1.016711181640625, 1.017343994140625, 1.0178682861328125, 1.017290771484375, 1.0172057495117188, 1.0169927978515625, 1.016964111328125, 1.0171351318359374, 1.0170265502929687, 1.0169200439453125, 1.0168862915039063, 1.0171627807617187, 1.0168002319335938, 2.112787353515625, 1.0166026000976562, 1.0169682006835938, 1.0167654418945313, 1.0172047119140626, 1.0169948120117187, 1.0164418334960938, 1.0169876708984376, 1.0169866333007813, 1.0173931274414063, 1.0172119140625, 1.017186279296875, 1.0169815063476562, 1.0177986450195313, 1.0174259033203126, 1.017359375, 1.0174873657226562, 1.0177362060546875, 1.01767578125, 1.017439208984375, 1.0174658813476563, 1.01768701171875, 1.0180730590820313, 1.0177741088867187, 1.0177136840820313, 1.0177843017578125, 1.0180269775390625, 1.0175682373046875, 1.0174586791992188, 1.0174105834960938, 1.01734912109375, 1.0172139282226562, 1.0173184204101562, 1.0173480834960937, 1.0167736206054687, 1.0164807739257813, 1.0166026000976562, 1.0172415771484375, 1.01684326171875, 1.0170951538085937, 1.0171576538085938, 1.0169251708984375, 1.0167306518554688, 1.0167562255859375, 1.0166569213867187, 1.0165821533203125, 1.0164367065429687, 1.0164756469726564, 1.016543212890625, 1.0165913696289062, 1.0170572509765625, 1.0170706176757813, 1.0169989013671874, 1.0169262084960937, 1.0167183227539063, 1.01669580078125, 1.0168842163085938, 1.0169262084960937, 1.016958984375, 1.0170848999023439, 1.0168914184570312, 1.0169169921875, 1.017260009765625, 2.113946533203125, 1.0165718994140625, 1.0166046752929687, 1.016953857421875, 1.0168678588867188, 1.0169241333007812, 1.0169292602539062, 1.0173470458984375, 1.0169763793945312, 1.0174136352539063, 1.0167705688476563, 1.017006103515625, 1.0173532104492187, 1.0174310302734375, 1.0171514892578124, 1.017064453125, 1.0168955078125, 1.0172467041015625, 1.016848388671875, 1.016911865234375, 1.016647705078125, 1.0169364624023438, 1.0165985107421875, 1.0165892944335937, 1.0166067504882812, 1.0164940795898438, 1.0166220703125, 1.0168770751953125, 1.0167285766601561, 1.0169609985351562, 1.0163701782226562, 1.0168135375976564, 1.01677978515625, 1.01676953125, 1.0166210327148437, 1.016521728515625, 1.0165770263671876, 1.0167521362304688, 1.0170449829101562, 1.0166937866210937, 1.0167091064453124, 1.01682177734375, 1.0167613525390624, 1.0167510986328125, 1.0168627319335937, 1.016816650390625, 1.0168186645507813, 1.0168770751953125, 1.0175191040039062, 1.0170839233398437, 1.0171156616210937, 1.0169978637695312, 1.0167551879882812, 1.0168893432617188, 1.0166149291992188, 1.0167460327148437, 1.0174617309570313, 1.0178723754882812, 1.0175508422851562, 1.0171740112304688, 1.0164818115234375, 1.016890380859375, 1.0167900390625, 2.115039306640625, 1.0166619873046876, 1.0170449829101562, 1.0170029907226563, 1.0168207397460938, 1.0171340942382812, 1.0175303955078125, 1.0170664672851562, 1.0168944702148437, 1.0170839233398437, 1.0168811645507811, 1.0166190185546875, 1.0173521728515624, 1.0172764282226563, 1.0172467041015625, 1.0180219116210938, 1.0176358642578125, 1.0167941284179687, 1.0168053588867187, 1.016974365234375, 1.0168657836914063, 1.0167101440429687, 1.0172078247070313, 1.017523193359375, 1.0172088623046875, 1.0169343872070313, 1.0170183715820313, 1.0173358154296874, 1.0169886474609375, 1.0170839233398437, 1.0169886474609375, 1.0171361083984376, 1.0170203857421876, 1.0177321166992188, 1.01720166015625, 1.0167777099609374, 1.0169886474609375, 1.0173583374023438, 1.0168914184570312, 1.0169722900390625, 1.0168985595703126, 1.0167562255859375, 1.016875, 1.0164859008789062, 1.0166527709960937, 1.01667431640625, 1.0164827880859375, 1.0166220703125, 1.0165606689453126, 1.0166558837890625, 1.016774658203125, 1.0169169921875, 1.0172507934570312, 1.0167654418945313, 1.0169661254882814, 1.0170787963867187, 1.0171719970703126, 1.0167449340820311, 1.0168955078125, 1.016932373046875, 1.0169548950195313, 1.0173470458984375, 1.0174095458984376]",tokens/s,0.968194209602791,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3193.102336,5128.060928,0.0,4481.613824,4276.256768,s,10,3.2413734130859373,0.32413734130859373,0.0016825840816240875,0.3241157684326172,0.32623468627929686,0.3264099456787109,0.32655015319824215,"[0.326585205078125, 0.3253639221191406, 0.3222522888183594, 0.32284228515625, 0.32339013671875, 0.32129388427734373, 0.32326702880859376, 0.3248414001464844, 0.32534152221679685, 0.3261957397460937]",tokens/s,789.7886709580806,kWh,3.8053463575326738e-06,2.0845240635480878e-06,1.689038677618604e-05,2.27802571972668e-05,tokens/kWh,11237801.126789523,MB,3193.102336,5128.060928,0.0,4481.613824,4465.661952,s,10,189.078173828125,18.9078173828125,0.012273201694566093,18.9097021484375,18.920707812499998,18.9233919921875,18.9255393359375,"[18.91315625, 18.920111328125, 18.9024375, 18.901080078125, 18.88138671875, 18.897064453125, 18.91123828125, 18.908166015625, 18.926076171875, 18.91745703125]",tokens/s,3.331955176236681,kWh,0.00022324078258437416,0.00012235523933478362,0.0009709997090908196,0.0013165957310099774,tokens/kWh,47850.67922988926,,s,629,191.68752508544924,0.3047496424251975,0.038502073286161766,0.300015625,0.3008729064941406,0.3014152465820313,0.6232092919921874,"[0.3012270202636719, 0.30114816284179685, 0.3007774658203125, 0.30110003662109375, 0.29941351318359377, 0.29971661376953124, 0.2996531066894531, 0.2997452697753906, 0.30033203125, 0.3001241455078125, 0.30014566040039065, 0.3000565795898438, 0.3007344665527344, 0.30101809692382814, 0.2998548583984375, 0.2999808044433594, 0.3005767822265625, 0.29991937255859374, 0.3002378234863281, 0.3010672607421875, 0.30078155517578126, 0.30025625610351564, 0.29994802856445313, 0.3000330505371094, 0.3002951354980469, 0.30040884399414064, 0.30003506469726565, 0.30066790771484375, 0.30030438232421874, 0.30015179443359374, 0.30028594970703126, 0.3002818603515625, 0.30056243896484375, 0.3007068786621094, 0.3003309326171875, 0.29966232299804685, 0.2997370910644531, 0.2999521179199219, 0.29981594848632814, 0.2995947570800781, 0.29999102783203124, 0.29974835205078126, 0.299620361328125, 0.2994493408203125, 0.2994759826660156, 0.29976165771484375, 0.30030642700195315, 0.3000166320800781, 0.2999060363769531, 0.30026956176757813, 0.2995128173828125, 0.29971661376953124, 0.2999552001953125, 0.3005082092285156, 0.30011181640625, 0.2995394592285156, 0.3000186767578125, 0.30043032836914063, 0.29998388671875, 0.29997259521484376, 0.299610107421875, 0.29887283325195313, 0.6254059448242187, 0.3006320495605469, 0.3013908386230469, 0.30085733032226564, 0.3013273620605469, 0.29998489379882814, 0.2996244506835937, 0.3003135986328125, 0.299826171875, 0.2998814697265625, 0.300400634765625, 0.29994085693359374, 0.3004631042480469, 0.3001395263671875, 0.2999992370605469, 0.299926513671875, 0.300759033203125, 0.300590087890625, 0.29988455200195313, 0.29991729736328127, 0.30021221923828123, 0.30040472412109376, 0.3002593383789062, 0.30088909912109374, 0.3011358642578125, 0.2998763427734375, 0.30023577880859376, 0.30020709228515624, 0.3008174133300781, 0.3003627624511719, 0.300759033203125, 0.3007027587890625, 0.30042620849609375, 0.30063821411132813, 0.2999624328613281, 0.29996844482421875, 0.30086349487304687, 0.30146969604492185, 0.29992141723632815, 0.3000361022949219, 0.3000586242675781, 0.29992755126953125, 0.2999654541015625, 0.2998486938476562, 0.30020095825195314, 0.3000299377441406, 0.30309478759765623, 0.3002388610839844, 0.2999859313964844, 0.3003576354980469, 0.30067608642578125, 0.3001446533203125, 0.30003302001953125, 0.3000166320800781, 0.2999244689941406, 0.3001692199707031, 0.29990194702148437, 0.3005317077636719, 0.30013031005859375, 0.2998405151367188, 0.29998797607421873, 0.2999336853027344, 0.30006988525390627, 0.622824462890625, 0.30008013916015625, 0.29997259521484376, 0.2997729187011719, 0.29949029541015626, 0.300337158203125, 0.30040985107421875, 0.30018765258789065, 0.2999029846191406, 0.29995724487304687, 0.30025112915039065, 0.30034637451171875, 0.29992959594726565, 0.30029824829101565, 0.3003873291015625, 0.2998978576660156, 0.30032589721679687, 0.3002255249023438, 0.3004590454101562, 0.3001773681640625, 0.2999449462890625, 0.2996777038574219, 0.2997125244140625, 0.3017687072753906, 0.2998691711425781, 0.29983026123046874, 0.2998783874511719, 0.30037503051757813, 0.3002183532714844, 0.29920254516601563, 0.29975244140625, 0.2997073974609375, 0.29997671508789064, 0.2997862548828125, 0.30004327392578123, 0.2995845031738281, 0.29983743286132813, 0.29989376831054687, 0.3005511779785156, 0.3004989318847656, 0.3004620666503906, 0.2998620300292969, 0.2996592712402344, 0.2998609924316406, 0.29997567749023435, 0.2994964599609375, 0.3000780944824219, 0.30188134765625, 0.29991015625, 0.30026956176757813, 0.30005453491210937, 0.29941964721679687, 0.3000770568847656, 0.2994923400878906, 0.29899981689453126, 0.299219970703125, 0.29973809814453123, 0.29967666625976563, 0.30005966186523436, 0.3000115661621094, 0.3005040283203125, 0.2999951477050781, 0.30062490844726564, 0.6233589477539062, 0.29990194702148437, 0.3002234802246094, 0.2998190002441406, 0.30008734130859377, 0.30004833984375, 0.2998200378417969, 0.3000770568847656, 0.300015625, 0.3000606689453125, 0.29941146850585937, 0.29921484375, 0.299578369140625, 0.2996756591796875, 0.2996449279785156, 0.30001971435546876, 0.3000995788574219, 0.29999002075195313, 0.3003607177734375, 0.30082763671875, 0.3000340576171875, 0.300611572265625, 0.30002584838867186, 0.299694091796875, 0.29959066772460935, 0.30004019165039064, 0.2999818115234375, 0.30008218383789065, 0.29999002075195313, 0.3006935119628906, 0.2997913513183594, 0.29910015869140627, 0.2994124755859375, 0.29980160522460936, 0.2999715881347656, 0.3000504455566406, 0.3000074157714844, 0.2999378051757812, 0.2999920654296875, 0.3000719299316406, 0.2999992370605469, 0.30074981689453123, 0.30103448486328127, 0.2998681640625, 0.3038136291503906, 0.3005091857910156, 0.3002746887207031, 0.2998763427734375, 0.299767822265625, 0.29981594848632814, 0.2999541625976562, 0.29982720947265623, 0.299641845703125, 0.29993267822265623, 0.29993472290039064, 0.2999715881347656, 0.2998343811035156, 0.29893017578125, 0.29962136840820314, 0.30002072143554687, 0.2994237365722656, 0.299831298828125, 0.30021630859375, 0.623921142578125, 0.3000299377441406, 0.30005966186523436, 0.299926513671875, 0.299947021484375, 0.300115966796875, 0.29993572998046875, 0.30002072143554687, 0.30004531860351563, 0.29974118041992187, 0.30062188720703126, 0.29995513916015626, 0.29942477416992186, 0.2993663940429688, 0.29983026123046874, 0.2999603271484375, 0.2999285888671875, 0.3015209045410156, 0.29998797607421873, 0.30004327392578123, 0.30011700439453126, 0.2999449462890625, 0.30022860717773436, 0.3003965759277344, 0.30081021118164064, 0.30038836669921876, 0.30042520141601564, 0.30013336181640626, 0.3006033935546875, 0.29989376831054687, 0.29989273071289063, 0.30014874267578123, 0.3004436340332031, 0.299747314453125, 0.2998476867675781, 0.3000094604492187, 0.29924453735351564, 0.29881036376953124, 0.29879910278320315, 0.2987581481933594, 0.29875506591796874, 0.29884518432617185, 0.298787841796875, 0.2988482666015625, 0.29977191162109373, 0.3000657958984375, 0.2989967346191406, 0.2990592041015625, 0.2991626281738281, 0.2990673828125, 0.29893017578125, 0.2991349792480469, 0.299323486328125, 0.29853277587890625, 0.29891278076171873, 0.29906942749023435, 0.2987386779785156, 0.3028009033203125, 0.29910748291015626, 0.2988552551269531, 0.2989045715332031, 0.299146240234375, 0.29900595092773435, 0.621075439453125, 0.2991349792480469, 0.2998179931640625, 0.29896807861328123, 0.2991811828613281, 0.2986914672851563, 0.29862911987304686, 0.29889239501953124, 0.2989116516113281, 0.2987960205078125, 0.29937152099609377, 0.29994189453125, 0.29974630737304686, 0.30000537109375, 0.300537841796875, 0.29991015625, 0.30004837036132814, 0.300326904296875, 0.3002030029296875, 0.3000575866699219, 0.3000770568847656, 0.3004405822753906, 0.2999531555175781, 0.30005148315429686, 0.3000484008789063, 0.3000626525878906, 0.30036172485351564, 0.3001272277832031, 0.30000640869140627, 0.2996879272460938, 0.29982925415039063, 0.29986611938476565, 0.3000391540527344, 0.2998128662109375, 0.29970330810546875, 0.30041189575195315, 0.3001200561523438, 0.29997772216796875, 0.3035576171875, 0.30046923828125, 0.29996749877929685, 0.3007201232910156, 0.3004538879394531, 0.29997671508789064, 0.30013543701171874, 0.30009344482421874, 0.3003351135253906, 0.3000340576171875, 0.300626953125, 0.29982925415039063, 0.29992959594726565, 0.299978759765625, 0.3000924072265625, 0.30002484130859375, 0.29995724487304687, 0.2998896789550781, 0.299720703125, 0.2996705322265625, 0.2998804626464844, 0.2997698669433594, 0.3005307006835938, 0.29985382080078127, 0.30008731079101564, 0.6239016723632812, 0.3004989318847656, 0.3000094604492187, 0.3003781127929688, 0.3024650268554688, 0.30008941650390625, 0.30021728515625, 0.29997671508789064, 0.3000985717773437, 0.2997452697753906, 0.3001559143066406, 0.299789306640625, 0.299578369140625, 0.3000637512207031, 0.3000022888183594, 0.30041189575195315, 0.300179443359375, 0.30062490844726564, 0.3001978759765625, 0.29980978393554686, 0.30084915161132814, 0.3001692199707031, 0.30007400512695315, 0.3024322204589844, 0.3004334106445313, 0.29990194702148437, 0.30002584838867186, 0.2999787902832031, 0.3001108093261719, 0.3005962219238281, 0.3003105163574219, 0.30027059936523437, 0.299931640625, 0.2999029846191406, 0.3000309753417969, 0.30007601928710936, 0.2999808044433594, 0.299936767578125, 0.3001610107421875, 0.29992755126953125, 0.29989376831054687, 0.29997567749023435, 0.3000391540527344, 0.3000443115234375, 0.3002992248535156, 0.2999715881347656, 0.2998241577148438, 0.2998548278808594, 0.3000862731933594, 0.300653564453125, 0.3015086059570313, 0.29992242431640626, 0.299863037109375, 0.29971966552734375, 0.29996954345703125, 0.2999613342285156, 0.3001968688964844, 0.2997841796875, 0.2999797668457031, 0.29949029541015626, 0.2995916748046875, 0.29981695556640625, 0.30072625732421876, 0.6255585327148437, 0.30017230224609376, 0.299831298828125, 0.3000555419921875, 0.30022042846679686, 0.30029925537109375, 0.30140109252929687, 0.30094949340820315, 0.2998886413574219, 0.29979647827148437, 0.30036376953125, 0.29990194702148437, 0.30001458740234377, 0.300010498046875, 0.29990707397460936, 0.29992959594726565, 0.3013775329589844, 0.2998958129882813, 0.3009157104492188, 0.3004344177246094, 0.2997698974609375, 0.299400146484375, 0.3000780944824219, 0.300410888671875, 0.3009054870605469, 0.3005409240722656, 0.301384765625, 0.30065350341796876, 0.30062591552734375, 0.30096075439453124, 0.3004375, 0.30160894775390623, 0.3008819274902344, 0.3004241943359375, 0.29964389038085937, 0.2996091003417969, 0.2996643981933594, 0.29964901733398436, 0.2998384704589844, 0.29929779052734373, 0.29952517700195314, 0.30001556396484375, 0.29967666625976563, 0.29992242431640626, 0.2999613342285156, 0.299926513671875, 0.2998753356933594, 0.2995947570800781, 0.2999183349609375, 0.2996172790527344, 0.29993267822265623, 0.30100991821289064, 0.30017843627929686, 0.30001254272460937, 0.29992141723632815, 0.29997055053710936, 0.300147705078125, 0.3002347412109375, 0.29967462158203123, 0.299504638671875, 0.299514892578125, 0.29878680419921877, 0.2995189819335938, 0.6255062866210938, 0.30063821411132813, 0.30005966186523436, 0.30045697021484374, 0.3000965270996094, 0.2999285888671875, 0.300015625, 0.300263427734375, 0.299894775390625, 0.29981695556640625, 0.30063821411132813, 0.300000244140625, 0.29996954345703125, 0.29998797607421873, 0.30009344482421874, 0.2999808044433594, 0.3002378234863281, 0.30191104125976564, 0.3011993713378906, 0.30066586303710935, 0.3013355407714844, 0.30207794189453124, 0.302529541015625, 0.30110720825195314, 0.3007958984375, 0.3010785217285156, 0.30087783813476565, 0.2998056945800781, 0.30067608642578125, 0.3011614685058594, 0.3007979431152344, 0.30112152099609374, 0.3007201232910156, 0.3007068176269531, 0.30179122924804686, 0.3001978759765625, 0.3004272766113281, 0.2999080810546875, 0.3004764099121094, 0.30043954467773437, 0.3001835632324219, 0.3006484375, 0.3004712829589844, 0.3014246826171875, 0.3003248291015625, 0.30087884521484376, 0.3002808227539063, 0.299840576171875, 0.3000575256347656, 0.30002383422851564, 0.2996745910644531, 0.29950567626953123, 0.2997145690917969, 0.29997567749023435, 0.30001458740234377, 0.30037503051757813, 0.30008114624023435, 0.2989066162109375, 0.30013543701171874, 0.2999111633300781, 0.29975347900390625, 0.2997350463867188, 0.30005453491210937, 0.6268078002929688, 0.30009548950195314, 0.29986407470703125, 0.30065869140625, 0.3011307373046875, 0.30074368286132813, 0.3008604125976562, 0.30035250854492185, 0.30087167358398437, 0.29988760375976564, 0.30013644409179685, 0.3006300048828125, 0.3005399169921875, 0.30181991577148437, 0.30090756225585935, 0.29990191650390624, 0.30007601928710936, 0.30002688598632815, 0.2999869384765625, 0.3001077880859375, 0.30047845458984374, 0.29997567749023435, 0.3000227966308594, 0.3018362731933594, 0.30386279296875, 0.30291455078125, 0.303236083984375, 0.3023329162597656, 0.30000436401367186, 0.2989014892578125, 0.2988851318359375, 0.29931622314453127, 0.2989588623046875, 0.29878271484375, 0.2992056274414063, 0.30051431274414064, 0.3006761169433594, 0.29950460815429686, 0.29902847290039064, 0.2990940246582031, 0.29902545166015626, 0.29914212036132815, 0.29988455200195313, 0.299652099609375, 0.30047232055664064, 0.2996326293945312, 0.30089727783203124, 0.29991729736328127, 0.3000965270996094, 0.30000234985351565, 0.29993365478515627, 0.3000115051269531, 0.2999603881835938, 0.29999508666992186, 0.29998284912109374, 0.2999869384765625, 0.30106521606445313, 0.3007928466796875, 0.300732421875, 0.29975653076171876, 0.29992755126953125, 0.30012313842773436, 0.29981491088867185]",tokens/s,3.281382029005844,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1378.24256,1340.604416,0.0,694.157312,598.498816,s,10,0.5756522903442383,0.05756522903442383,0.0016369727383896268,0.0568012638092041,0.05918429183959961,0.06064101028442383,0.061806385040283206,"[0.06209772872924805, 0.05672822570800781, 0.056688095092773436, 0.05679859161376953, 0.05886057662963867, 0.057347423553466795, 0.05689571380615235, 0.05671388626098633, 0.05680393600463867, 0.05671811294555664]",tokens/s,4447.129009890898,kWh,6.782567046937489e-07,3.716537143845926e-07,1.9805285685486103e-06,3.0304389876269517e-06,tokens/kWh,84476209.89738722,MB,1378.24256,1340.604416,0.0,694.157312,659.031552,s,10,35.66919848632813,3.566919848632813,0.03950000205433062,3.552089111328125,3.624692236328125,3.64104033203125,3.65411880859375,"[3.57450146484375, 3.532454345703125, 3.56766015625, 3.657388427734375, 3.621059326171875, 3.5402509765625, 3.56031982421875, 3.532387939453125, 3.539317626953125, 3.5438583984375]",tokens/s,17.662297633109883,kWh,4.217196008799568e-05,2.3111615147015983e-05,0.00011743306696224983,0.00018271664219726155,tokens/kWh,344796.1786205822,,s,629,36.1255710258484,0.05743334026366993,0.0068485805293125165,0.05611315155029297,0.058175487518310545,0.05834158096313477,0.11305373931884766,"[0.05799008178710938, 0.05708492660522461, 0.05782732772827148, 0.05744332885742188, 0.057527294158935545, 0.05809561538696289, 0.0565401611328125, 0.05911040115356445, 0.05911040115356445, 0.058807296752929686, 0.058791934967041014, 0.05813555145263672, 0.05813248062133789, 0.05811199951171875, 0.058054656982421876, 0.05821952056884765, 0.057041919708251954, 0.056956928253173826, 0.057338878631591796, 0.05677363204956055, 0.05590323257446289, 0.05592268753051758, 0.05590425491333008, 0.05611008071899414, 0.05601484680175781, 0.05587148666381836, 0.055932926177978515, 0.056509441375732425, 0.05619200134277344, 0.056089599609375, 0.05595647811889649, 0.055981056213378906, 0.056079360961914064, 0.05589503860473633, 0.0559554557800293, 0.056065025329589846, 0.0564029426574707, 0.05732863998413086, 0.057398273468017576, 0.05806694412231445, 0.05649203109741211, 0.05601792144775391, 0.05590425491333008, 0.05598822402954102, 0.05598310470581055, 0.05660671997070312, 0.057234432220458986, 0.05611315155029297, 0.05587251281738281, 0.055923713684082034, 0.05598822402954102, 0.056018943786621096, 0.05615820693969727, 0.05591244888305664, 0.05669478225708008, 0.05584281539916992, 0.05593395233154297, 0.05587148666381836, 0.056016895294189455, 0.056000511169433595, 0.05590016174316406, 0.05584896087646484, 0.11391897583007812, 0.056886272430419924, 0.05697945785522461, 0.05592268753051758, 0.0564951057434082, 0.05716787338256836, 0.056594432830810545, 0.05600153732299805, 0.055695358276367186, 0.05583564758300781, 0.05596057510375976, 0.055913471221923826, 0.05584896087646484, 0.05577318572998047, 0.05587148666381836, 0.056035327911376956, 0.055943168640136716, 0.05580799865722656, 0.05776486587524414, 0.057622528076171874, 0.05631180953979492, 0.055818241119384764, 0.055777278900146485, 0.05587251281738281, 0.0560118408203125, 0.05598918533325195, 0.05593395233154297, 0.05613772964477539, 0.056908798217773435, 0.05586841583251953, 0.055809024810791016, 0.05591961669921875, 0.05592063903808594, 0.05590425491333008, 0.056174591064453126, 0.05596364974975586, 0.05545369720458984, 0.05593804931640625, 0.055806976318359375, 0.05584896087646484, 0.05586431884765625, 0.055890945434570315, 0.055839744567871094, 0.05590630340576172, 0.055741439819335936, 0.05607321548461914, 0.055858177185058595, 0.05598515319824219, 0.05599641418457031, 0.056035327911376956, 0.056120319366455076, 0.05593907165527344, 0.05596364974975586, 0.055932926177978515, 0.055947265625, 0.05584076690673828, 0.05575884628295898, 0.055823360443115234, 0.05576704025268555, 0.05586636734008789, 0.055923713684082034, 0.05593088150024414, 0.0557916145324707, 0.11292671966552735, 0.05593600082397461, 0.056169471740722655, 0.055927806854248044, 0.05587558364868164, 0.05590835189819336, 0.055769088745117185, 0.056676353454589844, 0.05695078277587891, 0.05639987182617188, 0.056005630493164066, 0.05686995315551758, 0.05640902328491211, 0.05631488037109375, 0.05584588623046875, 0.057027584075927736, 0.05787955093383789, 0.05715967941284179, 0.05641113662719727, 0.05674803161621094, 0.057038848876953124, 0.05642956924438477, 0.05662105560302735, 0.05600460815429688, 0.05647359848022461, 0.056395774841308595, 0.05592473602294922, 0.05653299331665039, 0.05728460693359375, 0.056796192169189456, 0.056970207214355466, 0.05783552169799805, 0.056842239379882815, 0.05652377700805664, 0.05740031814575195, 0.056120319366455076, 0.05686272048950195, 0.05625446319580078, 0.05662105560302735, 0.056174591064453126, 0.05706854248046875, 0.056346622467041016, 0.05597798538208008, 0.05667020797729492, 0.05655551910400391, 0.05687705612182617, 0.057256961822509764, 0.05748121643066406, 0.057133056640625, 0.05615923309326172, 0.055962623596191405, 0.05652070236206055, 0.056753150939941405, 0.056771583557128906, 0.056970241546630856, 0.05616844940185547, 0.057388031005859375, 0.05657702255249023, 0.057078784942626956, 0.05769420623779297, 0.05860966491699219, 0.056180736541748044, 0.05587148666381836, 0.11307008361816406, 0.05602406311035156, 0.05604044723510742, 0.056018943786621096, 0.058791934967041014, 0.0583741455078125, 0.05816524887084961, 0.05799321746826172, 0.058156032562255856, 0.058518527984619144, 0.058423297882080075, 0.05813350296020508, 0.058103809356689455, 0.05830451202392578, 0.058180606842041016, 0.058041343688964846, 0.05809254455566406, 0.05821952056884765, 0.05822873687744141, 0.05809151840209961, 0.058120193481445315, 0.05805158233642578, 0.05811609649658203, 0.05830348968505859, 0.058006526947021485, 0.05869670486450195, 0.058369022369384765, 0.058347518920898435, 0.058028030395507815, 0.058055679321289064, 0.058210304260253906, 0.0581058578491211, 0.058054656982421876, 0.058288158416748045, 0.058100704193115235, 0.05831987380981445, 0.058275840759277345, 0.05816524887084961, 0.05805977630615235, 0.05837823867797851, 0.05834444808959961, 0.05804032135009766, 0.05805158233642578, 0.05809971237182617, 0.058052608489990234, 0.05806489562988281, 0.05809254455566406, 0.05809664154052734, 0.05816729736328125, 0.0582042236328125, 0.05809862518310547, 0.05814169692993164, 0.05818675231933594, 0.057608192443847656, 0.05821440124511719, 0.058124320983886715, 0.058272735595703125, 0.058308609008789064, 0.058169345855712894, 0.058071041107177736, 0.058175487518310545, 0.05823897552490234, 0.0583372802734375, 0.11740262603759766, 0.058298366546630856, 0.0585544319152832, 0.05803107070922851, 0.05821948623657226, 0.05826047897338867, 0.05810793685913086, 0.058076126098632816, 0.0581662712097168, 0.05814988708496094, 0.05825843048095703, 0.05809356689453125, 0.05850931167602539, 0.058336254119873046, 0.058426368713378904, 0.05825331115722656, 0.058418174743652344, 0.0581396484375, 0.05809254455566406, 0.05811097717285156, 0.05806796646118164, 0.05830144119262695, 0.05805977630615235, 0.05824512100219727, 0.058036224365234375, 0.058142719268798826, 0.05810892868041992, 0.05846227264404297, 0.058175487518310545, 0.058110912322998046, 0.05813248062133789, 0.0584089584350586, 0.05830451202392578, 0.0581396484375, 0.058211326599121094, 0.05804851150512695, 0.0581662712097168, 0.05817350387573242, 0.05801055908203125, 0.0581662712097168, 0.05825331115722656, 0.05611929702758789, 0.05594521713256836, 0.055949310302734374, 0.05611008071899414, 0.056019966125488284, 0.055982078552246094, 0.055962623596191405, 0.05608448028564453, 0.05570457458496094, 0.056204288482666016, 0.05575372695922851, 0.055787521362304686, 0.05749350357055664, 0.05674803161621094, 0.055940097808837894, 0.05600460815429688, 0.056019966125488284, 0.05588787078857422, 0.05589913558959961, 0.05583052825927735, 0.055907329559326174, 0.05695283126831055, 0.11374079895019532, 0.055927806854248044, 0.05590323257446289, 0.05608652877807617, 0.05595340728759766, 0.056174591064453126, 0.05607321548461914, 0.05605686569213867, 0.056255455017089846, 0.05570969772338867, 0.05589606475830078, 0.05596063995361328, 0.056186817169189454, 0.05588991928100586, 0.055947265625, 0.055981056213378906, 0.056204288482666016, 0.05590016174316406, 0.055982078552246094, 0.05592575836181641, 0.05591862487792969, 0.055920608520507814, 0.05575475311279297, 0.055944190979003904, 0.05595852661132812, 0.055911422729492184, 0.05676134490966797, 0.05655039978027344, 0.055940097808837894, 0.055934974670410156, 0.05602918243408203, 0.056057857513427733, 0.055940097808837894, 0.05590425491333008, 0.056027137756347656, 0.05611724853515625, 0.055994369506835937, 0.05601484680175781, 0.05608448028564453, 0.055962623596191405, 0.05571177673339844, 0.055808990478515626, 0.05793484878540039, 0.057431041717529295, 0.056551422119140625, 0.05656576156616211, 0.05593600082397461, 0.05665689468383789, 0.05718732833862305, 0.055906368255615235, 0.058727359771728514, 0.05800755310058594, 0.05647257614135742, 0.056513534545898435, 0.05671116638183594, 0.05588172912597656, 0.056030208587646485, 0.05598003387451172, 0.05590528106689453, 0.05593600082397461, 0.05535129547119141, 0.05544755172729492, 0.05648691177368164, 0.11342233276367188, 0.05600358581542969, 0.05600153732299805, 0.05581107330322266, 0.055964672088623046, 0.05609062576293945, 0.05611724853515625, 0.05599846267700195, 0.056147968292236325, 0.05704908752441406, 0.05751091384887695, 0.05676031875610352, 0.05597798538208008, 0.05692620849609375, 0.05621247863769531, 0.05710335922241211, 0.05665280151367187, 0.055989246368408206, 0.0569989128112793, 0.056965118408203126, 0.056476673126220706, 0.05757952117919922, 0.05608243179321289, 0.057240577697753904, 0.05665280151367187, 0.056497150421142575, 0.057215999603271485, 0.0573573112487793, 0.05771366500854492, 0.056035327911376956, 0.05668044662475586, 0.05652479934692383, 0.056048641204833986, 0.056766464233398435, 0.05603737640380859, 0.05681568145751953, 0.05561439895629883, 0.055951358795166016, 0.05671014404296875, 0.05602099227905273, 0.05684531021118164, 0.05712998580932617, 0.05622579193115235, 0.05718425750732422, 0.05587046432495117, 0.05815193557739258, 0.05669580841064453, 0.056288257598876956, 0.05639168167114258, 0.05692416000366211, 0.05890764617919922, 0.05709619140625, 0.055943168640136716, 0.05594521713256836, 0.05592473602294922, 0.055702529907226565, 0.05582233428955078, 0.05554380798339844, 0.05590425491333008, 0.0567193603515625, 0.05655756759643555, 0.05586636734008789, 0.05595238494873047, 0.11324825286865234, 0.056097793579101565, 0.05586841583251953, 0.0565401611328125, 0.05603737640380859, 0.05586841583251953, 0.05586841583251953, 0.055787582397460934, 0.05582944107055664, 0.05609062576293945, 0.05594524765014648, 0.05589398574829101, 0.05601484680175781, 0.05601587295532227, 0.05612851333618164, 0.056167423248291014, 0.05636508941650391, 0.05600048065185547, 0.05591449737548828, 0.05615820693969727, 0.05590425491333008, 0.056697856903076174, 0.056491008758544924, 0.05597491073608398, 0.0558837776184082, 0.055894016265869144, 0.05606195068359375, 0.056013824462890625, 0.055877632141113284, 0.0558131217956543, 0.0560445442199707, 0.05591756820678711, 0.05575478363037109, 0.05573014450073242, 0.055923713684082034, 0.05588889694213867, 0.05579673767089844, 0.05603123092651367, 0.05595340728759766, 0.05692416000366211, 0.05709721755981445, 0.0572149772644043, 0.05634969711303711, 0.0558919677734375, 0.05590528106689453, 0.0558766098022461, 0.055841793060302736, 0.05595033645629883, 0.05608448028564453, 0.05609267044067383, 0.05588479995727539, 0.05583769607543945, 0.05596160125732422, 0.0559554557800293, 0.05604556655883789, 0.05690572738647461, 0.05594214248657227, 0.055856128692626954, 0.05609267044067383, 0.05597183990478516, 0.0560076789855957, 0.05595340728759766, 0.055993408203125, 0.11313145446777344, 0.05605068969726563, 0.05714432144165039, 0.05607219314575195, 0.055894016265869144, 0.05605990219116211, 0.055856128692626954, 0.055787521362304686, 0.0557916145324707, 0.05595647811889649, 0.05597183990478516, 0.055567359924316405, 0.05594521713256836, 0.05598720169067383, 0.05587251281738281, 0.055973888397216794, 0.05577318572998047, 0.055757823944091796, 0.055972862243652347, 0.05598310470581055, 0.05603839874267578, 0.056036350250244144, 0.05586438369750977, 0.055879615783691404, 0.05582950210571289, 0.056062976837158204, 0.055747615814208985, 0.05714838409423828, 0.05821952056884765, 0.0560711669921875, 0.05586739349365234, 0.05611520004272461, 0.05608857727050781, 0.05595340728759766, 0.0560076789855957, 0.055930912017822264, 0.05865881729125977, 0.05698665618896484, 0.0559431037902832, 0.05590835189819336, 0.05589811325073242, 0.05590323257446289, 0.05572403335571289, 0.05595852661132812, 0.055923713684082034, 0.05552844619750977, 0.05647052764892578, 0.05722623825073242, 0.05685452651977539, 0.05584384155273438, 0.05696409606933594, 0.05613260650634765, 0.05573222351074219, 0.05603123092651367, 0.05594214248657227, 0.05622579193115235, 0.05601587295532227, 0.055932926177978515, 0.05593907165527344, 0.05829119873046875, 0.05608038330078125, 0.056581119537353515, 0.056043521881103515, 0.11301171112060547, 0.05596160125732422, 0.05600460815429688, 0.05594828796386719, 0.0573306884765625, 0.05730099105834961, 0.056136703491210936, 0.05597183990478516, 0.055757823944091796, 0.05651865768432617, 0.05613772964477539, 0.05588684844970703, 0.05589811325073242, 0.055934974670410156, 0.055951358795166016, 0.05582438278198242, 0.055932926177978515, 0.05587251281738281, 0.05707059097290039, 0.057232383728027345, 0.057132030487060545, 0.056750080108642575, 0.05586227035522461, 0.055964672088623046, 0.05600972747802734, 0.05593088150024414, 0.05628518295288086, 0.05651148986816406, 0.056172542572021485, 0.05634764862060547, 0.055940097808837894, 0.0572149772644043, 0.05617049789428711, 0.05602201461791992, 0.05591654586791992, 0.05619609451293945, 0.0580239372253418, 0.056460289001464846, 0.055949310302734374, 0.056018943786621096, 0.056033279418945314, 0.05574860763549805, 0.0558960952758789, 0.056247264862060546, 0.05595443344116211, 0.05688217544555664, 0.05635276794433594, 0.055982078552246094, 0.055987232208251955, 0.0560035514831543, 0.05590937423706055, 0.05664051055908203, 0.05720883178710937, 0.056853504180908204, 0.056360958099365234, 0.0558919677734375, 0.05595852661132812, 0.05596160125732422, 0.05597491073608398, 0.05603228759765625, 0.0562564811706543, 0.055975936889648435, 0.055949310302734374]",tokens/s,17.41148948344487,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1979.4944,5480.382464,0.0,4833.93536,4503.282688,s,10,5.706418334960937,0.5706418334960938,0.0011422166985642673,0.5705734558105469,0.5721191223144532,0.5723321746826172,0.5725026165771484,"[0.5707071533203125, 0.5725452270507813, 0.570284912109375, 0.5698196411132812, 0.5708470458984375, 0.569266357421875, 0.5704397583007812, 0.571673828125, 0.57207177734375, 0.5687626342773437]",tokens/s,448.6176529182773,kWh,6.72348948356546e-06,3.6841831159411714e-06,3.142670724010705e-05,4.183437983961368e-05,tokens/kWh,6119368.829691346,MB,1979.4944,5480.382464,0.0,4833.93536,4688.699392,s,10,334.87464062500004,33.487464062499996,0.006284503345517177,33.486009765625,33.4966609375,33.498351953125,33.499704765625005,"[33.50004296875, 33.49628515625, 33.487125, 33.48791796875, 33.48489453125, 33.48313671875, 33.490703125, 33.48450390625, 33.48125, 33.47878125]",tokens/s,1.8813010111013093,kWh,0.0003953183352579305,0.00021666870724086642,0.0018133419907958965,0.002425329033294693,tokens/kWh,25975.85693946752,,s,629,339.4783874511717,0.5397112678079045,0.0678655066405644,0.531504150390625,0.5319763916015625,0.5321570190429687,1.1021465185546875,"[0.531127197265625, 0.5313873901367188, 0.5311314086914063, 0.5313760986328125, 0.5314212036132813, 0.5316044921875, 0.5316137084960938, 0.5314949340820313, 0.5322680053710938, 0.531794921875, 0.5314816284179688, 0.5319925537109375, 0.531863525390625, 0.5316638793945313, 0.5320120239257813, 0.5315921630859375, 0.5318819580078125, 0.5321605224609375, 0.53214208984375, 0.531884033203125, 0.5315399780273438, 0.5322833862304688, 0.532052978515625, 0.5314406127929687, 0.5317867431640625, 0.5322066040039063, 0.531673095703125, 0.53195263671875, 0.5317314453125, 0.5317376098632812, 0.5320519409179687, 0.5320724487304688, 0.531541015625, 0.5316557006835938, 0.53180517578125, 0.5318154296875, 0.5315655517578125, 0.5316137084960938, 0.5321820068359375, 0.531557373046875, 0.5312399291992187, 0.5314426879882812, 0.5312901000976562, 0.53134130859375, 0.531072998046875, 0.5317980346679687, 0.5320304565429688, 0.5319249877929687, 0.5319178466796874, 0.5318133544921875, 0.531989501953125, 0.5314345092773437, 0.531373046875, 0.531641357421875, 0.5318492431640625, 0.5319464721679688, 0.5321226196289063, 0.5319137573242188, 0.5322987670898438, 0.531788818359375, 0.53163623046875, 0.5317109985351562, 1.1043450927734375, 0.531673095703125, 0.5321574096679688, 0.5315389404296875, 0.5321922607421875, 0.531525634765625, 0.5317652587890624, 0.531631103515625, 0.5321697387695312, 0.5315164184570312, 0.5316024169921875, 0.5313341674804688, 0.5321021728515625, 0.5321441040039062, 0.5318215942382812, 0.531609619140625, 0.5315983276367188, 0.5313085327148438, 0.5319906005859375, 0.5317283325195312, 0.5320519409179687, 0.5316751098632813, 0.5315983276367188, 0.5317775268554688, 0.5322587890625, 0.5316044921875, 0.5319588012695312, 0.5312645263671875, 0.5314641723632813, 0.5313402709960937, 0.531599365234375, 0.5311528930664062, 0.5314417114257812, 0.5315245361328125, 0.5315297241210938, 0.531078125, 0.53165771484375, 0.5314437255859376, 0.5313720092773437, 0.53186767578125, 0.531873779296875, 0.5317017822265625, 0.53191064453125, 0.5315491943359375, 0.531904541015625, 0.53142529296875, 0.532937744140625, 0.5315389404296875, 0.5317601318359375, 0.5315614624023437, 0.5316034545898437, 0.5312798461914062, 0.5315686645507812, 0.53125634765625, 0.5316055297851563, 0.5312440185546875, 0.5317672729492188, 0.5314345092773437, 0.5318533325195313, 0.5322587890625, 0.5322587890625, 0.5315594482421875, 0.5315952758789062, 1.10245166015625, 0.5316915283203125, 0.5318369140625, 0.531167236328125, 0.5314037475585938, 0.5311907958984375, 0.531399658203125, 0.5313310546875, 0.5314713745117188, 0.5310955810546875, 0.5314068603515625, 0.5314180908203125, 0.5313095703125, 0.5320089721679687, 0.532147216796875, 0.5318717041015625, 0.5314703369140625, 0.5313587036132813, 0.5315277099609375, 0.5315932006835937, 0.5320427856445312, 0.5314682006835938, 0.5316864013671875, 0.5312532348632812, 0.531578857421875, 0.5313218383789062, 0.5315369262695312, 0.5313751220703125, 0.5314324340820312, 0.5314744262695312, 0.53136181640625, 0.5313638305664062, 0.5315297241210938, 0.5316331787109375, 0.5315000610351562, 0.5318041381835937, 0.5314805908203125, 0.5310812377929688, 0.5316290283203124, 0.531251220703125, 0.5314805908203125, 0.5312870483398437, 0.5313802490234375, 0.5314498291015625, 0.531378173828125, 0.5312542724609375, 0.531356689453125, 0.5313423461914063, 0.5315717163085938, 0.5312665405273438, 0.5319905395507812, 0.5315286865234375, 0.5317857055664063, 0.5316792602539062, 0.5319916381835937, 0.5315020141601563, 0.5320150756835937, 0.531610595703125, 0.5316116333007812, 0.531620849609375, 0.5315635375976563, 0.53176318359375, 0.5320653076171875, 1.1017093505859374, 0.5309368286132813, 0.5314478149414062, 0.531114990234375, 0.5318041381835937, 0.531794921875, 0.5318615112304688, 0.5316116333007812, 0.5316034545898437, 0.5310453491210938, 0.5313955688476563, 0.5313310546875, 0.5316249389648438, 0.5315194702148438, 0.5316045532226562, 0.5311835327148438, 0.5314447631835938, 0.53148876953125, 0.5315830688476563, 0.5314241943359375, 0.5317130126953125, 0.5312276611328125, 0.5314457397460938, 0.5311815795898438, 0.5318450927734375, 0.5317929077148438, 0.5315983276367188, 0.5316392822265625, 0.5314345092773437, 0.5313597412109375, 0.5314928588867187, 0.53110986328125, 0.5317550048828125, 0.531198974609375, 0.5312501831054688, 0.5313074951171874, 0.5314006958007812, 0.5310812377929688, 0.5314263305664062, 0.53121435546875, 0.5314088745117187, 0.5312266235351563, 0.5319813232421875, 0.5324287719726563, 0.53281494140625, 0.532264892578125, 0.5315143432617188, 0.531272705078125, 0.5317017822265625, 0.5313546142578125, 0.531945556640625, 0.5313545532226562, 0.53178369140625, 0.5314877319335938, 0.5314969482421875, 0.5313597412109375, 0.5315460815429688, 0.5314918212890625, 0.5317130126953125, 0.5316566772460938, 0.5317969970703125, 0.5315768432617187, 0.53257421875, 1.1031951904296875, 0.5310873413085937, 0.531431396484375, 0.5310433349609375, 0.5313966064453125, 0.5312911376953126, 0.5316239624023438, 0.5313822631835937, 0.5315317993164063, 0.53124609375, 0.5313659057617187, 0.5311549682617187, 0.5316239624023438, 0.531420166015625, 0.5314857177734374, 0.531357666015625, 0.5316966552734375, 0.5313771362304688, 0.5317969970703125, 0.5319751586914062, 0.531504150390625, 0.5317877807617187, 0.53207958984375, 0.531504150390625, 0.5317980346679687, 0.5316649169921875, 0.5317243041992188, 0.5313710327148438, 0.53178369140625, 0.5310208129882813, 0.5313812255859375, 0.5311692504882812, 0.5312440185546875, 0.530914306640625, 0.531140625, 0.5312010498046875, 0.5316300659179688, 0.5319229736328125, 0.5317314453125, 0.5311651611328125, 0.5311211547851562, 0.531178466796875, 0.5316658935546875, 0.5312122802734375, 0.5314109497070313, 0.5314529418945313, 0.5315552978515625, 0.531398681640625, 0.5312880859375, 0.5313443603515625, 0.53163623046875, 0.5312163696289063, 0.5315215454101563, 0.5311047973632812, 0.5318932495117188, 0.531894287109375, 0.5316249389648438, 0.5334948120117188, 0.5316904907226563, 0.5312880859375, 0.5315880737304688, 0.5312071533203125, 0.5317755126953125, 1.1012259521484375, 0.5312655639648437, 0.531800048828125, 0.5310771484375, 0.5316597900390625, 0.5314877319335938, 0.5313556518554687, 0.531009521484375, 0.5315706787109375, 0.5319342041015624, 0.5316925659179688, 0.5315604248046875, 0.5314334716796875, 0.5310894165039063, 0.531968994140625, 0.53129931640625, 0.5314898071289063, 0.5309706420898438, 0.5316198120117187, 0.5311743774414063, 0.53146826171875, 0.5311262817382812, 0.5313914794921875, 0.5310750732421875, 0.5313494873046875, 0.53096142578125, 0.5314263916015625, 0.5310238037109375, 0.5317867431640625, 0.5314058227539062, 0.531926025390625, 0.5319659423828125, 0.5314160766601562, 0.531178466796875, 0.5314283447265625, 0.5311897583007813, 0.5313648681640625, 0.5312819213867187, 0.53264794921875, 0.531478515625, 0.5314662475585937, 0.5313802490234375, 0.53146728515625, 0.531188720703125, 0.531399658203125, 0.5311815795898438, 0.5315440673828125, 0.5322147827148438, 0.531962890625, 0.5316351928710937, 0.5318799438476562, 0.5312614135742187, 0.5315430297851562, 0.5312973022460937, 0.531641357421875, 0.5314744262695312, 0.5315491943359375, 0.5312296752929687, 0.531863525390625, 0.5313689575195313, 0.5315000610351562, 0.5313095703125, 0.5315379028320313, 1.1023165283203125, 0.5312081909179688, 0.5320724487304688, 0.5311488037109375, 0.5318819580078125, 0.5312911376953126, 0.5314949340820313, 0.5315706787109375, 0.5317857055664063, 0.5313474731445312, 0.5318563842773437, 0.5311016845703125, 0.5315194702148438, 0.53150927734375, 0.5318553466796875, 0.5311559448242188, 0.5314652099609375, 0.5313792114257813, 0.5314171142578125, 0.5314180908203125, 0.5321830444335938, 0.5318389892578125, 0.5316331787109375, 0.5316177368164062, 0.5313392944335937, 0.5313668823242188, 0.5315061645507813, 0.5312501831054688, 0.5319915771484375, 0.5316126708984374, 0.5317601318359375, 0.5316557006835938, 0.53146826171875, 0.5313116455078125, 0.5315369262695312, 0.5313966064453125, 0.5317078857421875, 0.5314437255859376, 0.5320560913085938, 0.5326489868164063, 0.5319014282226563, 0.531631103515625, 0.5314703369140625, 0.5317478637695312, 0.5316761474609375, 0.5316065063476563, 0.531609619140625, 0.5315194702148438, 0.5318502197265625, 0.5314826049804687, 0.5319669799804687, 0.5315072021484375, 0.5315419921875, 0.5310453491210938, 0.5313034057617188, 0.5311867065429687, 0.5317181396484375, 0.531683349609375, 0.5318041381835937, 0.5316966552734375, 0.5320130615234375, 0.5314692993164063, 0.5316198120117187, 1.103932373046875, 0.5310914306640625, 0.531578857421875, 0.5308538818359375, 0.5313659057617187, 0.5314221801757812, 0.5314703369140625, 0.5309716186523438, 0.5315850830078125, 0.5310443115234375, 0.5311928100585938, 0.53098291015625, 0.5311600341796875, 0.5316239624023438, 0.531800048828125, 0.531646484375, 0.5318225708007812, 0.5314242553710937, 0.5319229736328125, 0.53176318359375, 0.5316587524414063, 0.53146826171875, 0.531483642578125, 0.5312266235351563, 0.5317498779296875, 0.5314273071289063, 0.5314631958007813, 0.5311232299804688, 0.5317816162109374, 0.5313494873046875, 0.5318584594726562, 0.5314795532226563, 0.5324308471679687, 0.5313074951171874, 0.5318523559570313, 0.5310309448242188, 0.5311488037109375, 0.5311488037109375, 0.5314488525390625, 0.530966552734375, 0.5313894653320312, 0.5310975952148438, 0.5314426879882812, 0.5313054809570312, 0.5314180908203125, 0.5313187866210938, 0.5313914794921875, 0.5315809326171875, 0.5312921752929688, 0.5312911376953126, 0.531684326171875, 0.5320560913085938, 0.5315133666992188, 0.5317980346679687, 0.5318328247070313, 0.5314559936523438, 0.5321195678710937, 0.5318225708007812, 0.5315963134765626, 0.5315829467773437, 0.5318184814453125, 0.531431396484375, 0.5319127197265625, 1.103847412109375, 0.531304443359375, 0.5315451049804687, 0.531357666015625, 0.5316792602539062, 0.5319639282226563, 0.5315829467773437, 0.5313710327148438, 0.5315205078125, 0.5312491455078125, 0.5314611206054688, 0.5316341552734375, 0.5313760986328125, 0.5311549682617187, 0.531583984375, 0.5310637817382813, 0.5312645263671875, 0.53108837890625, 0.5316741333007813, 0.531162109375, 0.5312429809570313, 0.5314140014648437, 0.531314697265625, 0.5313054809570312, 0.531968017578125, 0.531599365234375, 0.5316085815429688, 0.5312634887695312, 0.5314406127929687, 0.53096240234375, 0.5313423461914063, 0.5313720092773437, 0.5316188354492187, 0.5309050903320313, 0.5311948852539062, 0.5310873413085937, 0.5315645141601563, 0.5312706298828125, 0.5313065185546875, 0.5313433837890625, 0.53119384765625, 0.5309767456054687, 0.5315348510742187, 0.532041748046875, 0.5317161254882813, 0.5314119873046875, 0.5313494873046875, 0.5311979370117188, 0.5316075439453125, 0.5313341674804688, 0.5313423461914063, 0.531177490234375, 0.5312553100585937, 0.531019775390625, 0.5315665893554687, 0.5312440185546875, 0.5314283447265625, 0.5314006958007812, 0.5314877319335938, 0.5311262817382812, 0.5316321411132813, 0.5312686157226563, 0.5320028076171875, 1.103824951171875, 0.5310279541015624, 0.53146826171875, 0.5310341186523437, 0.5313740844726562, 0.5310853271484375, 0.5314447631835938, 0.5311447143554687, 0.5314212036132813, 0.531420166015625, 0.5311948852539062, 0.5308630981445313, 0.5313853149414063, 0.5309020385742188, 0.53150830078125, 0.5310576171875, 0.5312839965820313, 0.5317191772460937, 0.5318482055664062, 0.5312532348632812, 0.5314641723632813, 0.5309531860351563, 0.5312553100585937, 0.5309808349609375, 0.5311590576171875, 0.5310494995117188, 0.531863525390625, 0.5317703857421875, 0.5314180908203125, 0.53118359375, 0.5314396362304687, 0.5311129150390625, 0.5312420043945313, 0.5310259399414062, 0.5311918334960938, 0.5312973022460937, 0.5321564331054688, 0.5315194702148438, 0.5316976928710937, 0.5313034057617188, 0.5316218872070313, 0.531430419921875, 0.5316812744140625, 0.5315625, 0.5316239624023438, 0.5310105590820312, 0.5315389404296875, 0.5312665405273438, 0.5317191772460937, 0.5312142944335938, 0.5314641723632813, 0.53167822265625, 0.5313607788085938, 0.5311918334960938, 0.5314918212890625, 0.53161474609375, 0.5318031616210938, 0.5315338134765625, 0.5315020751953125, 0.5313065185546875, 0.531968994140625, 0.5318994140625, 0.5317734375]",tokens/s,1.8528425468336207,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1273.409536,921.174016,0.0,274.726912,220.646912,s,10,0.326115104675293,0.032611510467529296,0.0012484582231843272,0.03218409538269043,0.032784641265869136,0.034564048767089836,0.035987574768066403,"[0.036343456268310544, 0.03226867294311524, 0.03213779067993164, 0.032141822814941406, 0.03203395080566406, 0.03214742279052735, 0.032220767974853515, 0.03238921737670898, 0.03208832168579102, 0.032343681335449216]",tokens/s,7849.989047728858,kWh,3.834838485734976e-07,2.1012961289151069e-07,8.25654520552228e-07,1.419267982017236e-06,tokens/kWh,180374674.2994524,MB,1273.409536,921.174016,0.0,274.726912,250.722304,s,10,20.085640136718748,2.008564013671875,0.006065788108395492,2.006720520019531,2.0181213745117184,2.0185785705566404,2.018944327392578,"[2.018019775390625, 2.006443115234375, 2.0069979248046876, 2.0103148193359375, 2.0040799560546874, 2.00180322265625, 2.00428125, 2.0133179931640623, 2.0013463134765623, 2.0190357666015624]",tokens/s,31.36569189290069,kWh,2.3731767884623614e-05,1.3005590929520364e-05,4.874369069043466e-05,8.548104950457861e-05,tokens/kWh,737005.457526882,,s,629,20.34066634368894,0.03233810229521298,0.0038819833531807996,0.03179007911682129,0.0323544822692871,0.032856268310546874,0.06427897552490235,"[0.03209625625610352, 0.031882240295410154, 0.031893503189086916, 0.032045055389404296, 0.03220787048339844, 0.031922176361083986, 0.031958015441894534, 0.03196518325805664, 0.031784959793090824, 0.03223756790161133, 0.031974399566650394, 0.031955968856811526, 0.03202969741821289, 0.03302809524536133, 0.03192835235595703, 0.031831008911132816, 0.03254272079467774, 0.033226783752441404, 0.032270305633544924, 0.032140289306640625, 0.031940607070922854, 0.032451583862304685, 0.03203788757324219, 0.03188531112670898, 0.032231422424316404, 0.031936511993408204, 0.03298099136352539, 0.03213516616821289, 0.03167948722839355, 0.031526912689208986, 0.031693824768066405, 0.03172966384887695, 0.03170918464660644, 0.03180953598022461, 0.03179929542541504, 0.031628288269042966, 0.031508480072021484, 0.03155660820007324, 0.03158937644958496, 0.03198259162902832, 0.03151974487304687, 0.03146342468261719, 0.031752191543579104, 0.03203481674194336, 0.03182796859741211, 0.03228160095214844, 0.03201331329345703, 0.03220377731323242, 0.03181056022644043, 0.031865856170654294, 0.031473663330078124, 0.031682559967041016, 0.031628288269042966, 0.03277414321899414, 0.03190790367126465, 0.0320777587890625, 0.03253350448608398, 0.032473087310791016, 0.032159744262695314, 0.03166924858093262, 0.03159654426574707, 0.031492095947265625, 0.06451507568359376, 0.032, 0.03342745590209961, 0.032282623291015625, 0.03176243209838867, 0.0318525447845459, 0.03180646324157715, 0.03143065643310547, 0.03154841613769531, 0.03168870353698731, 0.031514623641967776, 0.03150643157958984, 0.03214438247680664, 0.033160190582275394, 0.03213312149047851, 0.03156684875488281, 0.031958015441894534, 0.03179110336303711, 0.03184332847595215, 0.03142963218688965, 0.03191705513000488, 0.031768575668334964, 0.03164057540893555, 0.0317573127746582, 0.03179827117919922, 0.031731712341308595, 0.03301375961303711, 0.031920127868652344, 0.03172352027893066, 0.03289395141601562, 0.031927295684814457, 0.03162521553039551, 0.03168358421325684, 0.03179110336303711, 0.031666175842285156, 0.03177369689941406, 0.031678464889526366, 0.03179315185546875, 0.031765504837036135, 0.031699968338012696, 0.03142860794067383, 0.03162419128417969, 0.031615999221801756, 0.03140505599975586, 0.03147776031494141, 0.031458303451538085, 0.03143987274169922, 0.03174399948120117, 0.0317255687713623, 0.03142758369445801, 0.03156991958618164, 0.031783935546875, 0.03183616065979004, 0.0317890567779541, 0.03189248085021973, 0.03191910362243652, 0.031932416915893554, 0.032277503967285154, 0.032382976531982424, 0.0317706241607666, 0.03181260871887207, 0.031749120712280275, 0.03174502372741699, 0.06404198455810547, 0.03170611190795898, 0.031704063415527346, 0.031628288269042966, 0.03138764762878418, 0.031698944091796875, 0.03154022407531738, 0.03150540733337402, 0.03196416091918945, 0.03177881622314453, 0.032020481109619144, 0.03214233779907227, 0.03199692726135254, 0.031780864715576174, 0.03226009750366211, 0.032115711212158206, 0.03199180793762207, 0.03156787109375, 0.031453184127807614, 0.03144908714294434, 0.0314839038848877, 0.03146649551391602, 0.031458303451538085, 0.03172147178649903, 0.031893503189086916, 0.0328007698059082, 0.03156787109375, 0.031731712341308595, 0.031849472045898435, 0.03165388870239258, 0.03178700828552246, 0.03168259239196777, 0.03179311943054199, 0.031783935546875, 0.03212083053588867, 0.03178188705444336, 0.03179724884033203, 0.033549312591552735, 0.03284889602661133, 0.031834112167358396, 0.03212492752075195, 0.03198259162902832, 0.03174297523498535, 0.031731712341308595, 0.031753215789794925, 0.03182387161254883, 0.031473663330078124, 0.03165388870239258, 0.03157196807861328, 0.031453184127807614, 0.031734783172607424, 0.03165695953369141, 0.03180953598022461, 0.03184537506103516, 0.03207372665405273, 0.032285694122314454, 0.032556095123291016, 0.0321484146118164, 0.0324956169128418, 0.03182592010498047, 0.03148287963867188, 0.03179110336303711, 0.03181056022644043, 0.06443007659912109, 0.03183001518249512, 0.03196928024291992, 0.03182694435119629, 0.03170099258422852, 0.03181056022644043, 0.0315043830871582, 0.03177881622314453, 0.03177574348449707, 0.03191910362243652, 0.0321710090637207, 0.03204403305053711, 0.03181875228881836, 0.031703039169311525, 0.03174604797363281, 0.03175526428222656, 0.031764480590820314, 0.031731712341308595, 0.03183923149108887, 0.03340390396118164, 0.032922622680664065, 0.032161792755126956, 0.03223756790161133, 0.031734783172607424, 0.03291340637207031, 0.032074752807617186, 0.03202150344848633, 0.031716352462768556, 0.03155046463012695, 0.031692800521850584, 0.03226828765869141, 0.03171737670898438, 0.03140505599975586, 0.03134976005554199, 0.03197952079772949, 0.03200307083129883, 0.031696895599365234, 0.03169484710693359, 0.03176755142211914, 0.0321710090637207, 0.03153919982910156, 0.03143987274169922, 0.03146444892883301, 0.03160371208190918, 0.03245568084716797, 0.0314337272644043, 0.03163545608520508, 0.03231846237182617, 0.03222323226928711, 0.031942655563354495, 0.03174399948120117, 0.03162112045288086, 0.03139788818359375, 0.032302078247070314, 0.03218841552734375, 0.03275161743164062, 0.0322529296875, 0.03198361587524414, 0.03178803253173828, 0.031665151596069335, 0.03129241561889649, 0.03186073684692383, 0.031955968856811526, 0.06424063873291015, 0.03157811164855957, 0.03165695953369141, 0.03165081596374512, 0.03146854400634766, 0.031693824768066405, 0.03204095840454101, 0.03246182250976563, 0.03183103942871094, 0.03218636703491211, 0.03204095840454101, 0.031784959793090824, 0.03218431854248047, 0.03177779197692871, 0.031698944091796875, 0.03156684875488281, 0.031731712341308595, 0.03239424133300781, 0.03142758369445801, 0.03172966384887695, 0.03203379058837891, 0.03207167816162109, 0.03187302398681641, 0.033006591796875, 0.03175526428222656, 0.0316180477142334, 0.031959039688110355, 0.031514623641967776, 0.031884288787841795, 0.03161497688293457, 0.031440895080566404, 0.031442943572998046, 0.03174092864990234, 0.03172966384887695, 0.03197747230529785, 0.03176755142211914, 0.03205222320556641, 0.03165695953369141, 0.03194777679443359, 0.03171327972412109, 0.031562751770019534, 0.03168972778320313, 0.031731712341308595, 0.03172966384887695, 0.03186483192443847, 0.032143360137939454, 0.03146240043640137, 0.03147776031494141, 0.03138355255126953, 0.0313753604888916, 0.03180646324157715, 0.0317255687713623, 0.03186380767822266, 0.03160678482055664, 0.032494590759277346, 0.03222528076171875, 0.032045055389404296, 0.03183206367492676, 0.03183206367492676, 0.03180851173400879, 0.03134464073181152, 0.031470592498779294, 0.031731712341308595, 0.06429388427734375, 0.03167436790466309, 0.031923200607299806, 0.03159756851196289, 0.03151667213439941, 0.031647743225097655, 0.031802400588989255, 0.03235427093505859, 0.03198259162902832, 0.03172147178649903, 0.031888383865356446, 0.03179417610168457, 0.031507455825805664, 0.03172761535644531, 0.031579135894775394, 0.031692800521850584, 0.0315156478881836, 0.031440895080566404, 0.03173990440368652, 0.03155356788635254, 0.031624160766601565, 0.03153510475158691, 0.033051647186279294, 0.03177369689941406, 0.031783935546875, 0.03135897636413574, 0.03179007911682129, 0.031714303970336914, 0.031848447799682614, 0.031476736068725586, 0.03139993667602539, 0.03133440017700195, 0.03155763244628906, 0.03146956825256347, 0.03141024017333984, 0.031344575881958006, 0.03177574348449707, 0.03194675254821777, 0.03219046401977539, 0.03170918464660644, 0.031678464889526366, 0.03172249603271484, 0.03172147178649903, 0.031513599395751955, 0.03161497688293457, 0.03177779197692871, 0.03177574348449707, 0.03137228775024414, 0.03147369575500488, 0.031712223052978515, 0.03170918464660644, 0.03205222320556641, 0.03178803253173828, 0.03221299362182617, 0.03220889663696289, 0.03131391906738281, 0.03146956825256347, 0.03166720008850098, 0.031661056518554685, 0.03207376098632812, 0.03320111846923828, 0.032717823028564456, 0.032519168853759765, 0.06434508514404297, 0.031736831665039066, 0.031752191543579104, 0.031851520538330076, 0.03142963218688965, 0.03157196807861328, 0.032121856689453124, 0.03311513519287109, 0.03222534561157227, 0.03178079986572266, 0.03179827117919922, 0.03183206367492676, 0.03181465530395508, 0.03183206367492676, 0.03177267265319824, 0.03184025573730469, 0.031542272567749025, 0.03150540733337402, 0.03178598403930664, 0.03185663986206055, 0.03176038360595703, 0.03312639999389649, 0.03152179145812988, 0.032102401733398435, 0.03188019180297851, 0.03193036842346191, 0.031324159622192385, 0.031851520538330076, 0.03158016014099121, 0.031389696121215824, 0.03189555168151856, 0.03170099258422852, 0.03173785591125488, 0.031285247802734374, 0.03177068710327149, 0.031356864929199216, 0.03158527946472168, 0.031526912689208986, 0.03160985565185547, 0.03185868835449219, 0.031848447799682614, 0.031437824249267575, 0.0317388801574707, 0.03174297523498535, 0.0319866886138916, 0.03175014305114746, 0.031922176361083986, 0.031817728042602536, 0.03180544090270996, 0.03139072036743164, 0.03181260871887207, 0.03197952079772949, 0.03224166488647461, 0.03209011077880859, 0.03164057540893555, 0.03165184020996094, 0.031438848495483396, 0.03207372665405273, 0.03194675254821777, 0.03155148887634277, 0.03187302398681641, 0.03263488006591797, 0.03184025573730469, 0.06429801940917969, 0.03187299156188965, 0.03203276824951172, 0.032094207763671875, 0.03197849655151367, 0.031958015441894534, 0.031926271438598636, 0.031834112167358396, 0.03189452743530274, 0.03218841552734375, 0.032353279113769534, 0.0318791675567627, 0.03179315185546875, 0.031887359619140625, 0.03185868835449219, 0.03156480026245117, 0.03182796859741211, 0.0322426872253418, 0.032043006896972655, 0.03181670379638672, 0.03299020767211914, 0.032031742095947266, 0.03186892890930176, 0.032304126739501955, 0.03237171173095703, 0.0319866886138916, 0.031454208374023435, 0.031525888442993165, 0.03151155281066895, 0.031492095947265625, 0.03167334365844727, 0.031730688095092774, 0.03897753524780274, 0.032933887481689454, 0.031889408111572266, 0.03211980819702148, 0.031719423294067385, 0.031352832794189454, 0.03140505599975586, 0.031687711715698244, 0.031402975082397463, 0.03169177627563476, 0.031749120712280275, 0.03167948722839355, 0.03165695953369141, 0.0316753921508789, 0.03172659111022949, 0.03139072036743164, 0.03140812873840332, 0.03175526428222656, 0.03156684875488281, 0.03263590240478516, 0.03202252960205078, 0.03183923149108887, 0.0319682559967041, 0.031649791717529296, 0.031784959793090824, 0.03172352027893066, 0.03167129516601563, 0.0314337272644043, 0.03163443183898926, 0.03186483192443847, 0.03146137619018555, 0.06431948852539063, 0.03137843132019043, 0.03158016014099121, 0.031425535202026365, 0.031645696640014646, 0.031730688095092774, 0.03186486434936524, 0.031797216415405276, 0.0318156795501709, 0.03175628852844238, 0.031699968338012696, 0.031410175323486327, 0.03138355255126953, 0.03278540802001953, 0.03300454330444336, 0.032178176879882815, 0.0318156795501709, 0.031855615615844726, 0.031870975494384765, 0.03283967971801758, 0.0314204158782959, 0.0314081916809082, 0.031652799606323244, 0.03211775970458984, 0.031666175842285156, 0.03154022407531738, 0.031373311996459964, 0.03173785591125488, 0.031546367645263675, 0.0316753921508789, 0.0315043830871582, 0.031529983520507815, 0.031764480590820314, 0.03139481544494629, 0.03133440017700195, 0.03138559913635254, 0.03137843132019043, 0.031735807418823245, 0.031543296813964845, 0.03143065643310547, 0.03159859275817871, 0.031936511993408204, 0.031735807418823245, 0.03193548774719238, 0.031458303451538085, 0.031456256866455076, 0.031716352462768556, 0.0317388801574707, 0.032037952423095706, 0.031414207458496095, 0.0326901741027832, 0.03216793441772461, 0.031585344314575194, 0.03156883239746094, 0.03191398429870605, 0.031631359100341795, 0.031903743743896484, 0.03156076812744141, 0.03184736061096191, 0.03181363105773926, 0.03224371337890625, 0.03225702285766602, 0.031955968856811526, 0.06444134521484375, 0.03154431915283203, 0.031665151596069335, 0.03288780975341797, 0.031886335372924804, 0.03204403305053711, 0.03172659111022949, 0.031509504318237305, 0.03173785591125488, 0.032, 0.032320510864257815, 0.03182387161254883, 0.03230003356933594, 0.03187404823303223, 0.032198688507080075, 0.03247203063964844, 0.03235532760620117, 0.032702465057373044, 0.03311718368530273, 0.03155148887634277, 0.03170201683044434, 0.03225094223022461, 0.03220576095581055, 0.03237580871582031, 0.032230400085449216, 0.03218431854248047, 0.03240447998046875, 0.032449535369873043, 0.03225600051879883, 0.03243622589111328, 0.0319498233795166, 0.03165081596374512, 0.031528959274291994, 0.03161702346801758, 0.0321003532409668, 0.03190784072875977, 0.03203788757324219, 0.032328704833984374, 0.032467967987060545, 0.03184435272216797, 0.031665151596069335, 0.03177369689941406, 0.03174604797363281, 0.03233894348144531, 0.031704063415527346, 0.03172454452514648, 0.032039936065673826, 0.03196211242675781, 0.031926271438598636, 0.0328611831665039, 0.03221200180053711, 0.03217712020874024, 0.03181056022644043, 0.03184332847595215, 0.032064510345458985, 0.03207372665405273, 0.03184339141845703, 0.03191596794128418, 0.031817760467529294, 0.031796192169189455, 0.03153408050537109, 0.032161792755126956, 0.03232460784912109]",tokens/s,30.92327406447812,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1306.464256,2103.967744,0.0,1457.52064,1272.750592,s,10,1.329427551269531,0.13294275512695314,0.0012200886206429765,0.13265889739990233,0.1341803771972656,0.1350921875,0.1358216357421875,"[0.13600399780273437, 0.13269917297363282, 0.1317310791015625, 0.1318907470703125, 0.131806884765625, 0.1328501434326172, 0.1324745635986328, 0.13397775268554687, 0.13261862182617187, 0.1333745880126953]",tokens/s,1925.6408501202932,kWh,1.562583448681218e-06,8.560805149739424e-07,6.424628677710524e-06,8.843292641365686e-06,tokens/kWh,28948493.55120577,MB,1306.464256,2103.967744,0.0,1457.52064,1369.423872,s,10,78.31643701171875,7.831643701171875,0.021087870794415795,7.826485107421875,7.855575732421875,7.869772485351563,7.881129887695312,"[7.8200595703125, 7.8524208984375, 7.80727783203125, 7.828986328125, 7.81284423828125, 7.83669384765625, 7.82121484375, 7.825626953125, 7.82734326171875, 7.88396923828125]",tokens/s,8.044288326162373,kWh,9.24571096544203e-05,5.067334492555682e-05,0.00037095189544549136,0.0005140823500254685,tokens/kWh,122548.4593993139,,s,629,79.38599323272705,0.12620984615695877,0.015775713114662086,0.12392652893066407,0.12565974731445312,0.1260990447998047,0.2560962493896484,"[0.12357324981689453, 0.12395417785644532, 0.12677222442626954, 0.12635955047607422, 0.124906494140625, 0.12412108612060548, 0.12417945861816407, 0.12488601684570312, 0.12357427215576172, 0.12358553314208984, 0.12521676635742188, 0.12579737854003906, 0.1256099853515625, 0.12379033660888672, 0.12357119750976563, 0.12366028594970703, 0.12364492797851563, 0.12450201416015624, 0.12360806274414063, 0.12366643524169922, 0.12377497863769531, 0.12358348846435546, 0.12363878631591797, 0.12448051452636719, 0.12341043090820313, 0.12406886291503906, 0.1237401580810547, 0.12359986877441406, 0.12348108673095703, 0.12367359924316407, 0.12363571166992188, 0.12357324981689453, 0.12363468933105469, 0.12353740692138672, 0.12361625671386718, 0.12422758483886719, 0.12424396514892579, 0.12538982391357423, 0.12366336059570313, 0.12492594909667969, 0.12365926361083984, 0.12361727905273437, 0.12350054168701172, 0.12435148620605468, 0.12661043548583983, 0.12594380950927733, 0.12404121398925781, 0.12383539581298827, 0.12378726196289062, 0.12356095886230468, 0.12356813049316406, 0.12374221038818359, 0.12462796783447265, 0.12367359924316407, 0.12347289276123047, 0.12355174255371094, 0.1236684799194336, 0.12365004730224609, 0.12358656311035156, 0.12423372650146484, 0.12397772979736328, 0.12430950164794922, 0.25706393432617186, 0.12518399810791014, 0.1238814697265625, 0.12411289978027344, 0.12383539581298827, 0.12371456146240234, 0.12595814514160156, 0.12542259216308593, 0.12580659484863282, 0.12499353790283203, 0.1239152603149414, 0.12502528381347655, 0.12599295806884767, 0.12452146911621094, 0.12406169891357421, 0.12400128173828125, 0.12455014038085938, 0.12355481719970703, 0.12359884643554687, 0.12353740692138672, 0.1237760009765625, 0.1253580780029297, 0.12455423736572266, 0.1236305923461914, 0.12355788421630859, 0.12357529449462891, 0.12357529449462891, 0.123683837890625, 0.12378419494628906, 0.12360908508300782, 0.12378828430175781, 0.12359474945068359, 0.12440064239501954, 0.12405043029785157, 0.12471295928955078, 0.12421119689941407, 0.12409037017822265, 0.12466687774658203, 0.12391321563720703, 0.1252689895629883, 0.1255536651611328, 0.12441907501220703, 0.12589260864257812, 0.12565299224853516, 0.12552909088134764, 0.12456448364257812, 0.12414975738525391, 0.12398387145996094, 0.1251430435180664, 0.12559257507324217, 0.1256447982788086, 0.12538572692871094, 0.12506521606445312, 0.12563251495361327, 0.12649676513671876, 0.12583424377441407, 0.1255536651611328, 0.12570214080810546, 0.12467916870117188, 0.12509900665283202, 0.12491264343261718, 0.12573184204101562, 0.12440268707275391, 0.2558197784423828, 0.12364800262451171, 0.123683837890625, 0.12350975799560547, 0.12375552368164063, 0.12458700561523438, 0.12379647827148438, 0.12382003021240234, 0.12372787475585938, 0.12350975799560547, 0.12364492797851563, 0.12362751770019531, 0.12420403289794922, 0.12373401641845704, 0.12366745758056641, 0.12346367645263671, 0.1236490249633789, 0.12343807983398437, 0.12373401641845704, 0.12360601806640625, 0.1237391357421875, 0.12377907562255859, 0.12418252563476563, 0.12495769500732422, 0.12483993530273438, 0.12376576232910157, 0.12357529449462891, 0.12388044738769531, 0.12398592376708985, 0.12371250915527343, 0.12381696319580078, 0.1249966049194336, 0.12406578826904296, 0.12373709106445313, 0.1236316146850586, 0.12356710052490234, 0.1236305923461914, 0.12532736206054687, 0.12533248138427736, 0.1239900131225586, 0.12350566101074219, 0.12367155456542969, 0.12361727905273437, 0.12359065246582031, 0.12348108673095703, 0.12358451080322265, 0.12368691253662109, 0.12367565155029298, 0.12437299346923827, 0.12555570983886719, 0.12554956817626953, 0.12386406707763672, 0.12385485076904297, 0.12392857360839844, 0.12358246612548827, 0.12377907562255859, 0.12381081390380859, 0.12436070251464844, 0.12358144378662109, 0.12363775634765625, 0.12359168243408203, 0.12367974090576171, 0.12394802856445312, 0.2562037658691406, 0.12365312194824218, 0.1237053451538086, 0.1237760009765625, 0.12487884521484376, 0.12573286437988282, 0.12605644989013673, 0.12572876739501954, 0.12546969604492186, 0.1246740493774414, 0.124980224609375, 0.12507443237304688, 0.12493721771240235, 0.12486860656738281, 0.1260953598022461, 0.12375142669677734, 0.12360294342041016, 0.12364800262451171, 0.12349849700927734, 0.12341350555419922, 0.12356915283203125, 0.12356813049316406, 0.12392652893066407, 0.12379750061035157, 0.12378316497802734, 0.12362239837646484, 0.1237544937133789, 0.12356301116943359, 0.12365824127197265, 0.12351487731933594, 0.12436070251464844, 0.12455014038085938, 0.12394802856445312, 0.12437811279296875, 0.12415590667724609, 0.12446412658691407, 0.1244395523071289, 0.12447334289550781, 0.1255577621459961, 0.1261445083618164, 0.12384767913818359, 0.12500070190429688, 0.12377907562255859, 0.12363775634765625, 0.12484812927246093, 0.12531302642822265, 0.12550348663330077, 0.12423168182373047, 0.12353024291992187, 0.12354150390625, 0.12352819061279297, 0.12369817352294922, 0.12365004730224609, 0.12370738983154297, 0.12456755065917968, 0.12433100891113281, 0.1237739486694336, 0.12359168243408203, 0.12380569458007812, 0.12372684478759766, 0.12373299407958985, 0.12365721893310547, 0.12492594909667969, 0.2555535430908203, 0.12613938903808594, 0.12532940673828125, 0.12536217498779298, 0.1253396453857422, 0.12545536041259767, 0.12536831665039064, 0.12381081390380859, 0.12342476654052735, 0.12351283264160157, 0.12347801971435547, 0.12362035369873046, 0.12350669097900391, 0.12365414428710937, 0.12357324981689453, 0.12354252624511719, 0.12360192108154297, 0.12350975799560547, 0.12355379486083984, 0.12362751770019531, 0.12359065246582031, 0.12350259399414062, 0.12365414428710937, 0.12462079620361328, 0.12366643524169922, 0.12346572875976562, 0.12355891418457031, 0.12368691253662109, 0.12421836853027343, 0.12335001373291016, 0.12413645172119141, 0.12520652770996094, 0.12418355560302734, 0.1253570556640625, 0.12440064239501954, 0.1235230712890625, 0.1235968017578125, 0.12357529449462891, 0.12391321563720703, 0.12415590667724609, 0.12387635040283203, 0.12383539581298827, 0.12365106964111328, 0.12362649536132812, 0.12361727905273437, 0.12473446655273437, 0.12511949157714844, 0.12443341064453126, 0.12362854766845703, 0.12501708984375, 0.12398387145996094, 0.12350873565673828, 0.12383948516845703, 0.1234698257446289, 0.12409651184082031, 0.12359884643554687, 0.12379750061035157, 0.12368077087402343, 0.12469760131835937, 0.12350669097900391, 0.1235077133178711, 0.12355891418457031, 0.12358451080322265, 0.25665740966796874, 0.12402899169921874, 0.12411385345458985, 0.12439552307128907, 0.12551270294189454, 0.12451840209960938, 0.12435968017578125, 0.12583116912841796, 0.1254318084716797, 0.1253939208984375, 0.12529971313476562, 0.1254133758544922, 0.1254062042236328, 0.1256294403076172, 0.12560486602783202, 0.12544409942626952, 0.12538470458984374, 0.12542668914794922, 0.1252710418701172, 0.12386508941650391, 0.12361420440673829, 0.12358144378662109, 0.12357324981689453, 0.12350873565673828, 0.12352819061279297, 0.12368793487548828, 0.12353945922851563, 0.12357222747802735, 0.12369203186035156, 0.12346880340576172, 0.12355583953857421, 0.1235599365234375, 0.12362035369873046, 0.12436275482177735, 0.1252147216796875, 0.12532940673828125, 0.12448358154296875, 0.1234882583618164, 0.12387020874023437, 0.12536627197265626, 0.12422758483886719, 0.12369407653808594, 0.12347187042236328, 0.12357427215576172, 0.12363673400878906, 0.12348006439208985, 0.12376166534423828, 0.12611788940429688, 0.12559974670410157, 0.12563353729248047, 0.12546969604492186, 0.12542668914794922, 0.1254840316772461, 0.12547277069091797, 0.12400947570800781, 0.12405452728271485, 0.12358451080322265, 0.1235599365234375, 0.12348928070068359, 0.12358246612548827, 0.1234862060546875, 0.12351181030273438, 0.12364492797851563, 0.2565027770996094, 0.12444467163085937, 0.124115966796875, 0.12360499572753907, 0.12404326629638672, 0.12441804504394531, 0.1250918426513672, 0.1257748489379883, 0.12563353729248047, 0.12372889709472656, 0.12562022399902345, 0.12486041259765625, 0.12480921936035157, 0.123720703125, 0.12351897430419922, 0.12377088165283204, 0.12374221038818359, 0.12360908508300782, 0.1239920654296875, 0.1237022705078125, 0.12362547302246094, 0.12358758544921875, 0.12353330993652344, 0.12495155334472656, 0.12379033660888672, 0.1237227554321289, 0.12362137603759765, 0.1235445785522461, 0.12405248260498047, 0.12381081390380859, 0.12356403350830078, 0.12472217559814452, 0.12582911682128906, 0.1260021743774414, 0.12565503692626953, 0.12548607635498046, 0.125338623046875, 0.12534374237060547, 0.12404428863525391, 0.12439449310302735, 0.12376576232910157, 0.12392550659179688, 0.12379135894775391, 0.1235384292602539, 0.12375961303710938, 0.12353228759765625, 0.12357427215576172, 0.12382310485839844, 0.1239767074584961, 0.12351385498046875, 0.12377088165283204, 0.12349235534667968, 0.12367667388916016, 0.1235568618774414, 0.12351283264160157, 0.12387225341796874, 0.12435968017578125, 0.12366745758056641, 0.12357734680175782, 0.12359986877441406, 0.12354560089111329, 0.12352819061279297, 0.12502630615234375, 0.26002227783203125, 0.12474163055419922, 0.12465561676025391, 0.12385485076904297, 0.12388966369628907, 0.12364697265625, 0.12477133178710938, 0.12527410888671875, 0.12405145263671875, 0.1237760009765625, 0.12372991943359375, 0.1253529586791992, 0.12522803497314453, 0.12430233764648438, 0.12380057525634766, 0.12450713348388671, 0.12450508880615234, 0.12496998596191407, 0.12366950225830078, 0.12387840270996094, 0.12420403289794922, 0.1244395523071289, 0.12422451019287109, 0.12519014739990234, 0.12686438751220702, 0.12572672271728516, 0.12376882934570313, 0.12486656188964844, 0.1239767074584961, 0.12363263702392578, 0.12419276428222656, 0.12381183624267578, 0.12427263641357422, 0.12527206420898437, 0.12414259338378907, 0.12360499572753907, 0.12352102661132812, 0.12354662322998047, 0.12364083099365235, 0.12379545593261719, 0.1257748489379883, 0.12477439880371094, 0.12361011505126954, 0.12374425506591796, 0.12376268768310547, 0.12368691253662109, 0.12373606109619141, 0.12365824127197265, 0.1240596466064453, 0.12369305419921875, 0.12396441650390624, 0.12373811340332032, 0.12371353912353515, 0.12357119750976563, 0.12355379486083984, 0.1238067169189453, 0.12372889709472656, 0.12425318145751953, 0.12379853057861329, 0.12396031951904297, 0.12386099243164063, 0.12410060882568359, 0.12376780700683594, 0.2569492492675781, 0.12346163177490234, 0.12355788421630859, 0.12358451080322265, 0.12365516662597656, 0.12385894775390625, 0.12412210845947266, 0.12367871856689452, 0.12457984161376953, 0.12387123107910156, 0.12360704040527344, 0.12351078033447266, 0.12353638458251953, 0.12353638458251953, 0.12359782409667969, 0.12352921295166015, 0.12678758239746094, 0.1258260498046875, 0.12560076904296874, 0.12596224212646484, 0.12543590545654296, 0.12542668914794922, 0.12532838439941407, 0.12374221038818359, 0.12540006256103517, 0.12414771270751954, 0.12379750061035157, 0.12367871856689452, 0.12370944213867187, 0.12368077087402343, 0.12369817352294922, 0.12355379486083984, 0.12357119750976563, 0.12395622253417969, 0.12402175903320313, 0.12356505584716797, 0.12348210906982422, 0.12368793487548828, 0.12353536224365234, 0.12395622253417969, 0.12403302764892578, 0.12700466918945313, 0.12573798370361328, 0.12478463745117188, 0.12397158050537109, 0.12360192108154297, 0.12377292633056641, 0.12353024291992187, 0.12364390563964844, 0.12410163116455078, 0.12373197174072266, 0.123936767578125, 0.12391731262207031, 0.12367155456542969, 0.12363980865478516, 0.12377804565429687, 0.12557823944091798, 0.12434022521972657, 0.12393472290039062, 0.12455423736572266, 0.12541849517822265, 0.12554239654541016, 0.12558438110351564, 0.25772952270507815, 0.12436172485351563, 0.12393984222412109, 0.12354867553710938, 0.12373197174072266, 0.12357119750976563, 0.123578369140625, 0.12372889709472656, 0.12355379486083984, 0.12389990234375, 0.12362239837646484, 0.12408729553222657, 0.12435148620605468, 0.1244395523071289, 0.12433100891113281, 0.12407398223876953, 0.12391117095947266, 0.12426445007324219, 0.12396236419677735, 0.12403404998779297, 0.1263288345336914, 0.12405554962158204, 0.12396646118164062, 0.1240995864868164, 0.12518297576904297, 0.12740505981445313, 0.12441292572021484, 0.12416512298583984, 0.12624896240234376, 0.12561100769042968, 0.12505804443359375, 0.12423577880859375, 0.12659302520751953, 0.1257891845703125, 0.1247457275390625, 0.12536627197265626, 0.12574105834960939, 0.12544102478027344, 0.12642201232910155, 0.1264035873413086, 0.12703129577636718, 0.12545126342773438, 0.12710707092285156, 0.12490751647949219, 0.1260052490234375, 0.12597964477539061, 0.1264691162109375, 0.12679987335205078, 0.12610150146484375, 0.12555980682373047, 0.12536831665039064, 0.12553011322021485, 0.12543283081054687, 0.125591552734375, 0.12561817932128908, 0.12541849517822265, 0.1260789794921875, 0.12718182373046874, 0.1256980514526367, 0.12580147552490234, 0.1268490219116211, 0.1256785888671875, 0.12574310302734376]",tokens/s,7.92331209053505,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc366-16ba296e0d2b41e323bb140d;8f155bb5-b62d-422c-9184-1e3331566734) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f4b-685a032c036ebfa270af61b8;73cbaf08-7caa-467e-9a6d-3c133770c120) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2034.978816,5539.10272,0.0,4892.655616,4542.610432,s,10,5.622600402832031,0.5622600402832031,0.0013230028627895694,0.5620163879394531,0.5633294921875001,0.564493603515625,0.565424892578125,"[0.56565771484375, 0.5620866088867188, 0.5614910888671875, 0.5609406127929687, 0.5618070678710938, 0.5607069091796875, 0.5619461669921875, 0.5621692504882813, 0.5627241821289063, 0.56307080078125]",tokens/s,455.30534211724546,kWh,6.621943920114895e-06,3.6285156736691226e-06,3.075088879823785e-05,4.100134839202187e-05,tokens/kWh,6243697.098747441,MB,2035.28192,5539.10272,0.0,4892.655616,4726.279168,s,10,331.3280078125,33.13280078125,0.008409894982587748,33.12982421875,33.142355468750004,33.145203125,33.14748125,"[33.14805078125, 33.124625, 33.1231640625, 33.1260859375, 33.14172265625, 33.1400703125, 33.12555859375, 33.12609375, 33.13908203125, 33.1335546875]",tokens/s,1.9014390125344305,kWh,0.00039121345305516403,0.00021441890828907464,0.0018001254370127661,0.0024057577983570048,tokens/kWh,26187.174803309546,,s,629,335.85907128906234,0.5339571880589229,0.06674281532897068,0.5258792724609375,0.5263243408203125,0.5265016845703125,1.0872363134765626,"[0.5258577880859375, 0.5256417236328125, 0.5264219970703125, 0.5263196411132812, 0.5257113647460937, 0.5258464965820312, 0.525486083984375, 0.5262151489257813, 0.5262940063476562, 0.52614453125, 0.5258035278320312, 0.52607080078125, 0.5257379760742188, 0.5260421142578126, 0.5259489135742188, 0.526060546875, 0.5258331909179688, 0.5260062866210937, 0.5258424072265625, 0.52600732421875, 0.5258045654296875, 0.5261404418945312, 0.5261957397460938, 0.5260748901367187, 0.5259468994140625, 0.5261731567382812, 0.5258363037109375, 0.5262284545898438, 0.5258250122070313, 0.5260421142578126, 0.526023681640625, 0.526244873046875, 0.52615576171875, 0.5261332397460937, 0.52611376953125, 0.5259898681640625, 0.526482421875, 0.526271484375, 0.5264271240234375, 0.5263646850585938, 0.5265366821289063, 0.52632373046875, 0.5263370361328125, 0.5259735107421875, 0.5261414184570312, 0.526107666015625, 0.5262571411132813, 0.5263308715820313, 0.5263267822265625, 0.5261199340820313, 0.52626025390625, 0.5261434936523437, 0.526482421875, 0.5264199829101562, 0.5262622680664063, 0.5264076538085938, 0.5264937133789063, 0.5264998168945313, 0.5264230346679688, 0.5268643798828125, 0.5268623657226562, 0.526497802734375, 1.0878065185546875, 0.5256908569335937, 0.5257564086914063, 0.5259059448242187, 0.5258055419921875, 0.5255567626953125, 0.52566015625, 0.5258875122070312, 0.526002197265625, 0.5259949951171875, 0.5256663208007812, 0.5261107177734375, 0.5259857788085938, 0.5258189086914062, 0.525576171875, 0.525970458984375, 0.5256294555664063, 0.5262294921875, 0.5257728271484375, 0.5257666625976563, 0.525465576171875, 0.5258946533203125, 0.5259468994140625, 0.5260144653320312, 0.5255526123046875, 0.5257932739257812, 0.5255004272460938, 0.5259530029296875, 0.52562841796875, 0.5257584838867188, 0.5257738037109375, 0.525844482421875, 0.5255690307617188, 0.5256724243164063, 0.5258956909179687, 0.5257461547851563, 0.5255751953125, 0.5257482299804688, 0.5255536499023438, 0.5257677001953125, 0.5256314697265625, 0.5257533569335937, 0.5254666137695313, 0.52566015625, 0.5253980102539062, 0.5257482299804688, 0.5259049072265625, 0.5260809936523437, 0.5258383178710937, 0.5262018432617187, 0.5256294555664063, 0.525750244140625, 0.525849609375, 0.5261281127929688, 0.5256796264648438, 0.5259478759765625, 0.5256181640625, 0.5258219604492187, 0.5256539916992188, 0.525886474609375, 0.5259243774414063, 0.5260851440429688, 0.5258331909179688, 1.087382568359375, 0.526060546875, 0.5257789306640624, 0.5258936157226562, 0.5258137817382813, 0.5256263427734374, 0.525675537109375, 0.5257164916992187, 0.525739013671875, 0.525718505859375, 0.5260646362304687, 0.5256406860351562, 0.5256908569335937, 0.5256744995117187, 0.525433837890625, 0.525717529296875, 0.5254287109375, 0.5256908569335937, 0.5254031372070312, 0.5255137329101562, 0.5256263427734374, 0.525770751953125, 0.5254891357421875, 0.5259683837890625, 0.5255454711914063, 0.5256990966796875, 0.5255372924804688, 0.5261035766601563, 0.5256632080078125, 0.5258690795898437, 0.5257625732421874, 0.525728759765625, 0.5260155029296875, 0.5258189086914062, 0.5256539916992188, 0.5256417236328125, 0.5256273803710938, 0.5258055419921875, 0.5257083129882812, 0.525971435546875, 0.5258916015625, 0.52600830078125, 0.5256673583984375, 0.5257686767578125, 0.5254942626953125, 0.52560791015625, 0.5257482299804688, 0.5258803100585937, 0.5259755249023438, 0.5260676879882813, 0.5256151123046875, 0.5258536987304687, 0.5258526611328125, 0.5258741455078125, 0.5255966796875, 0.5259386596679687, 0.5257666625976563, 0.5259059448242187, 0.5259223022460937, 0.5260523681640625, 0.52583935546875, 0.5260933227539063, 0.5256048583984375, 1.0868602294921874, 0.5259120483398437, 0.5255465087890625, 0.5258884887695312, 0.5255741577148437, 0.525613037109375, 0.5256007690429687, 0.5258076171875, 0.5256734619140625, 0.5257328491210937, 0.5254573974609374, 0.525791259765625, 0.525454345703125, 0.5258527221679687, 0.5255577392578125, 0.525912109375, 0.5255772094726563, 0.525727783203125, 0.5255833129882812, 0.5263431396484375, 0.5257984008789063, 0.525792236328125, 0.525896728515625, 0.52569189453125, 0.5255977172851563, 0.52600732421875, 0.525549560546875, 0.5257738037109375, 0.5256058959960938, 0.5258147583007813, 0.52556494140625, 0.5260534057617188, 0.5259837646484375, 0.5260646362304687, 0.5258803100585937, 0.5260369873046875, 0.5255034790039063, 0.5258485717773438, 0.5255608520507813, 0.52600830078125, 0.52562841796875, 0.52608203125, 0.5257349243164062, 0.5257574462890625, 0.5256406860351562, 0.5257717895507813, 0.5257625732421874, 0.5258875122070312, 0.525686767578125, 0.5260492553710937, 0.5258639526367187, 0.5261209716796875, 0.5259990844726562, 0.5260728149414062, 0.5258884887695312, 0.5258946533203125, 0.52564990234375, 0.5258731689453126, 0.5256837158203125, 0.5259202270507812, 0.5259827270507812, 0.5264097290039063, 0.526213134765625, 1.0886195068359374, 0.5266964721679688, 0.5265223388671875, 0.52691455078125, 0.5266104125976563, 0.5267548217773438, 0.5260175170898438, 0.5263544311523437, 0.5261270751953125, 0.5263964233398437, 0.5262694702148437, 0.5260482788085937, 0.5259909057617187, 0.5260492553710937, 0.5259929809570313, 0.5264066772460938, 0.525787109375, 0.5257963256835938, 0.5255126953125, 0.5257431030273437, 0.5255669555664062, 0.5256345825195312, 0.5254779052734375, 0.5258198852539062, 0.5258219604492187, 0.5260687255859375, 0.5260800170898438, 0.525802490234375, 0.5258506469726563, 0.525781005859375, 0.5256857299804687, 0.5258485717773438, 0.52577587890625, 0.5258373413085937, 0.5261895751953125, 0.52682958984375, 0.526614501953125, 0.52650390625, 0.5255485229492187, 0.5258424072265625, 0.525549560546875, 0.5258352661132812, 0.5255639038085937, 0.525822998046875, 0.5257195434570312, 0.525739013671875, 0.5257267456054687, 0.5263533935546875, 0.5261547241210938, 0.5263810424804688, 0.5262264404296875, 0.526551025390625, 0.5261895751953125, 0.5266165771484375, 0.5261998291015625, 0.5261404418945312, 0.5261486206054687, 0.5264189453125, 0.52577587890625, 0.5259304809570312, 0.5256898803710938, 0.5258782958984375, 0.5257636108398438, 1.08657666015625, 0.5256980590820313, 0.52586083984375, 0.5259304809570312, 0.5256939697265625, 0.5258782958984375, 0.5258782958984375, 0.52608203125, 0.5260462036132812, 0.5260513305664063, 0.525549560546875, 0.5258875122070312, 0.5257984008789063, 0.5262018432617187, 0.5257328491210937, 0.5260298461914062, 0.5255608520507813, 0.5259683837890625, 0.52613427734375, 0.52609228515625, 0.5256028442382813, 0.52607080078125, 0.5256304931640625, 0.5257891845703125, 0.525643798828125, 0.5260062866210937, 0.5256611938476563, 0.5261844482421875, 0.5262510375976562, 0.5261025390625, 0.5262673950195312, 0.5262622680664063, 0.5260779418945313, 0.5261721801757813, 0.5261353149414062, 0.5261588745117187, 0.5259642944335937, 0.52661865234375, 0.52625, 0.5263790283203125, 0.5262643432617188, 0.5265131225585937, 0.5259069213867188, 0.526286865234375, 0.5260626220703125, 0.5262888793945313, 0.52619775390625, 0.526224365234375, 0.5260155029296875, 0.526613525390625, 0.526581787109375, 0.5259366455078125, 0.5256642456054688, 0.5257686767578125, 0.5256632080078125, 0.5258198852539062, 0.5258588256835938, 0.5261209716796875, 0.525928466796875, 0.5265029296875, 0.5259878540039062, 0.5262069702148438, 0.5264834594726563, 1.088301025390625, 0.5262653198242188, 0.5256724243164063, 0.5255659790039062, 0.525475830078125, 0.5257636108398438, 0.5255300903320312, 0.5257246704101562, 0.5256058959960938, 0.5256345825195312, 0.5254676513671875, 0.5258137817382813, 0.5256581420898437, 0.5258731689453126, 0.526055419921875, 0.526497802734375, 0.5257297973632813, 0.5260103759765625, 0.5256406860351562, 0.5259428100585938, 0.5259376831054687, 0.5261486206054687, 0.5255321655273437, 0.5259735107421875, 0.5258168334960938, 0.52605029296875, 0.5256571044921875, 0.5257420654296875, 0.5257471923828125, 0.525717529296875, 0.525749267578125, 0.5258168334960938, 0.5255885009765625, 0.5259366455078125, 0.5256325073242187, 0.5258721313476562, 0.5255454711914063, 0.5259120483398437, 0.5255874633789063, 0.5258823852539063, 0.5260584716796874, 0.5260093383789063, 0.525638671875, 0.5257984008789063, 0.5254819946289062, 0.5257584838867188, 0.5255413818359375, 0.5256263427734374, 0.5254993896484375, 0.5260482788085937, 0.5255536499023438, 0.5256744995117187, 0.5257471923828125, 0.5260912475585937, 0.5257963256835938, 0.526045166015625, 0.5258997802734375, 0.5258854370117187, 0.5258956909179687, 0.526097412109375, 0.5258956909179687, 0.52596630859375, 0.525486083984375, 1.0877613525390626, 0.525769775390625, 0.5254829711914063, 0.5259612426757813, 0.525929443359375, 0.5262827758789063, 0.5257748413085938, 0.5258516235351562, 0.5254768676757813, 0.5256775512695312, 0.5254871215820313, 0.5258475341796875, 0.525770751953125, 0.5260123901367187, 0.5260103759765625, 0.52575537109375, 0.525591552734375, 0.5260431518554688, 0.5255874633789063, 0.5257769165039062, 0.52560693359375, 0.5258577880859375, 0.5256089477539062, 0.5257195434570312, 0.52571337890625, 0.5260534057617188, 0.5259540405273437, 0.526023681640625, 0.5256468505859375, 0.5257000732421875, 0.5256878051757813, 0.5258270874023437, 0.5255669555664062, 0.5259325561523438, 0.5256478881835938, 0.5259059448242187, 0.5256539916992188, 0.5259171752929688, 0.525533203125, 0.5258956909179687, 0.5256396484375, 0.5259151611328124, 0.5259089965820313, 0.5258045654296875, 0.525470703125, 0.526087158203125, 0.525570068359375, 0.526012451171875, 0.5258239135742188, 0.526266357421875, 0.5257000732421875, 0.5258916015625, 0.5262540893554688, 0.525970458984375, 0.5258168334960938, 0.5259356079101563, 0.5257216186523438, 0.5257953491210937, 0.5261250610351562, 0.5259458618164062, 0.5255300903320312, 0.5259765625, 0.5258004760742188, 1.0886134033203125, 0.52569189453125, 0.5255536499023438, 0.5258383178710937, 0.5257799682617188, 0.5257584838867188, 0.5256837158203125, 0.5262305297851563, 0.5256099853515624, 0.525781005859375, 0.5258270874023437, 0.5262387084960938, 0.5258424072265625, 0.5259765625, 0.5260114135742188, 0.5261588745117187, 0.5259622192382812, 0.5260318603515625, 0.5257083129882812, 0.526166015625, 0.5256325073242187, 0.52587109375, 0.5258260498046875, 0.5261178588867188, 0.5256458129882813, 0.525897705078125, 0.5261752319335937, 0.5257431030273437, 0.5257799682617188, 0.526298095703125, 0.5260574951171875, 0.52619775390625, 0.52611279296875, 0.526271484375, 0.5262151489257813, 0.5269708862304687, 0.5263012084960937, 0.52657666015625, 0.5264937133789063, 0.5267752685546875, 0.5261527099609375, 0.5262315673828125, 0.5261690673828125, 0.5258997802734375, 0.5257728271484375, 0.5265807495117187, 0.525681640625, 0.5258721313476562, 0.5259049072265625, 0.525970458984375, 0.525633544921875, 0.5261414184570312, 0.5260697631835938, 0.5258782958984375, 0.5258792724609375, 0.52611376953125, 0.5257523193359375, 0.5261752319335937, 0.5259192504882813, 0.52607080078125, 0.5259898681640625, 0.5263278198242187, 0.5262356567382812, 1.089292236328125, 0.5257205810546876, 0.5256959838867188, 0.5258516235351562, 0.525675537109375, 0.525707275390625, 0.525644775390625, 0.5258741455078125, 0.5256427612304687, 0.52611376953125, 0.5256857299804687, 0.5256929321289062, 0.525865966796875, 0.5259274291992188, 0.5254942626953125, 0.52583935546875, 0.5259089965820313, 0.5260984497070312, 0.5255659790039062, 0.52596630859375, 0.5255341796875, 0.5257932739257812, 0.5256161499023437, 0.5257615356445312, 0.5255608520507813, 0.5256908569335937, 0.5256058959960938, 0.5258137817382813, 0.525822998046875, 0.5260390625, 0.5258168334960938, 0.526033935546875, 0.5262326049804688, 0.5259100341796875, 0.5258065795898438, 0.52625, 0.5258567504882813, 0.52642919921875, 0.5261015014648438, 0.525955078125, 0.5256345825195312, 0.5259990844726562, 0.5259017944335938, 0.525970458984375, 0.5258311767578125, 0.5259765625, 0.5258895263671876, 0.5263216552734375, 0.5260062866210937, 0.5263206176757812, 0.5261220092773438, 0.5258639526367187, 0.5263790283203125, 0.52634521484375, 0.52650390625, 0.5263104248046875, 0.5256294555664063, 0.5261782836914063, 0.5257164916992187, 0.526087158203125, 0.526256103515625, 0.52645068359375, 0.5261045532226563]",tokens/s,1.8728093232254581,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1743.122432,22129.672192,0.0,21483.225088,20799.036928,s,10,27.909502197265624,2.7909502197265623,0.0030225550178342967,2.7914050292968753,2.79439267578125,2.794928149414062,2.7953565283203123,"[2.794273681640625, 2.795463623046875, 2.7873525390625, 2.789448486328125, 2.787631103515625, 2.78628466796875, 2.790373291015625, 2.792436767578125, 2.79304931640625, 2.793188720703125]",tokens/s,91.72503263962948,kWh,3.290233454770512e-05,1.803171549396211e-05,0.0001583880155991868,0.00020932206564085407,tokens/kWh,1222995.765956342,MB,1743.720448,22129.672192,0.0,21483.225088,20902.142976,s,10,1661.799328125,166.1799328125,0.024199823300055656,166.1772734375,166.2117125,166.21745,166.22204,"[166.17496875, 166.167359375, 166.14703125, 166.2104375, 166.2231875, 166.181296875, 166.179578125, 166.204078125, 166.159546875, 166.15184375]",tokens/s,0.37910714569299164,kWh,0.0019616428730719624,0.001075153456676562,0.009382404450362024,0.012419200780110549,tokens/kWh,5072.790199261051,,s,629,1684.2762666015624,2.677704716377683,0.33156351074934576,2.63752294921875,2.6395720703125,2.6405790039062498,5.428127246093751,"[2.63648046875, 2.637477783203125, 2.636610595703125, 2.63773388671875, 2.63674365234375, 2.6374482421875, 2.63684716796875, 2.637740966796875, 2.636241943359375, 2.6376162109375, 2.636966796875, 2.6365615234375, 2.639180908203125, 2.637003662109375, 2.637414306640625, 2.638074951171875, 2.638488525390625, 2.637603759765625, 2.6381865234375, 2.6367314453125, 2.63737646484375, 2.636360595703125, 2.63697314453125, 2.6366044921875, 2.6370693359375, 2.636568603515625, 2.636980224609375, 2.636353515625, 2.63706103515625, 2.636314697265625, 2.637646728515625, 2.63864208984375, 2.637822998046875, 2.63769091796875, 2.636580810546875, 2.636631103515625, 2.6366474609375, 2.636380126953125, 2.635778076171875, 2.639795166015625, 2.637285400390625, 2.636735595703125, 2.63680419921875, 2.636505126953125, 2.636515380859375, 2.637959228515625, 2.6366884765625, 2.638927001953125, 2.6396015625, 2.6393056640625, 2.640069580078125, 2.638898193359375, 2.6396181640625, 2.639129638671875, 2.6404248046875, 2.63950439453125, 2.6398291015625, 2.639203369140625, 2.639447021484375, 2.638824462890625, 2.638972900390625, 2.637701171875, 5.43399951171875, 2.63682861328125, 2.637526123046875, 2.637077392578125, 2.636991455078125, 2.636851318359375, 2.63690966796875, 2.636982177734375, 2.638088134765625, 2.63716455078125, 2.63853662109375, 2.638309326171875, 2.63889111328125, 2.64060107421875, 2.637055908203125, 2.63606884765625, 2.63802880859375, 2.637928466796875, 2.6390927734375, 2.6384384765625, 2.637506591796875, 2.63756298828125, 2.63665869140625, 2.636583984375, 2.63687060546875, 2.6370908203125, 2.637446044921875, 2.63802685546875, 2.6375556640625, 2.637516845703125, 2.63621728515625, 2.637552734375, 2.637285400390625, 2.637663330078125, 2.6371767578125, 2.637763671875, 2.63699560546875, 2.63773583984375, 2.637076416015625, 2.637444091796875, 2.636642333984375, 2.636317626953125, 2.63705078125, 2.636695556640625, 2.637413330078125, 2.63615478515625, 2.6434365234375, 2.637322265625, 2.638057373046875, 2.63786181640625, 2.638676025390625, 2.637177734375, 2.63716455078125, 2.637470703125, 2.63781689453125, 2.636496826171875, 2.63722802734375, 2.637602783203125, 2.638507080078125, 2.637663330078125, 2.636621826171875, 2.63727001953125, 2.6375107421875, 5.42925830078125, 2.638592041015625, 2.6379560546875, 2.637390869140625, 2.636768310546875, 2.63889501953125, 2.638235595703125, 2.638288818359375, 2.637874267578125, 2.638148681640625, 2.637728759765625, 2.637327392578125, 2.63690869140625, 2.637347900390625, 2.6369453125, 2.637433837890625, 2.63707958984375, 2.636822509765625, 2.637656982421875, 2.63617529296875, 2.636454833984375, 2.6358466796875, 2.636423095703125, 2.63856640625, 2.63604833984375, 2.63657470703125, 2.637000732421875, 2.637095947265625, 2.636664794921875, 2.6381435546875, 2.640372802734375, 2.638454833984375, 2.636282958984375, 2.636675048828125, 2.635629638671875, 2.636507080078125, 2.63680615234375, 2.63701611328125, 2.636884033203125, 2.63752294921875, 2.6366669921875, 2.63699462890625, 2.63780859375, 2.6374072265625, 2.636739501953125, 2.6374296875, 2.637625244140625, 2.639678466796875, 2.63699658203125, 2.63729052734375, 2.637656982421875, 2.636547119140625, 2.6364794921875, 2.636135498046875, 2.636613525390625, 2.636599365234375, 2.63747265625, 2.6357861328125, 2.63600439453125, 2.6359716796875, 2.636907470703125, 2.636198974609375, 2.639195068359375, 5.42974560546875, 2.637918212890625, 2.638065673828125, 2.6382080078125, 2.639459228515625, 2.638834716796875, 2.640337890625, 2.639097900390625, 2.638284912109375, 2.639035400390625, 2.636672119140625, 2.63703955078125, 2.63772265625, 2.639283203125, 2.6380830078125, 2.639520751953125, 2.63948291015625, 2.637526123046875, 2.638147705078125, 2.637602783203125, 2.63681640625, 2.63762841796875, 2.638529541015625, 2.63696484375, 2.63714599609375, 2.637178955078125, 2.6376796875, 2.63855810546875, 2.641314697265625, 2.637876220703125, 2.63853369140625, 2.63912548828125, 2.637345703125, 2.636560302734375, 2.636971923828125, 2.636669921875, 2.636619873046875, 2.6392392578125, 2.63902099609375, 2.638465087890625, 2.63775341796875, 2.638636962890625, 2.637673583984375, 2.6372373046875, 2.64167431640625, 2.6387353515625, 2.637962158203125, 2.639416259765625, 2.637681640625, 2.639564697265625, 2.6377236328125, 2.638668701171875, 2.638095458984375, 2.63796728515625, 2.637655029296875, 2.637210693359375, 2.63780859375, 2.638158935546875, 2.637846435546875, 2.63988232421875, 2.641334228515625, 2.636745849609375, 2.63619482421875, 5.42609521484375, 2.63847412109375, 2.64098095703125, 2.6405458984375, 2.64087451171875, 2.64148388671875, 2.64074755859375, 2.64026318359375, 2.640604248046875, 2.640962646484375, 2.638622802734375, 2.63895458984375, 2.636958740234375, 2.636030029296875, 2.636072021484375, 2.637117431640625, 2.636577880859375, 2.642241455078125, 2.64078857421875, 2.641033203125, 2.641383544921875, 2.64137841796875, 2.639701904296875, 2.640848876953125, 2.640034912109375, 2.6397451171875, 2.640384033203125, 2.637106201171875, 2.636432373046875, 2.636267578125, 2.637424560546875, 2.636958740234375, 2.63680712890625, 2.637274169921875, 2.638011474609375, 2.63804931640625, 2.63707861328125, 2.636590087890625, 2.63739794921875, 2.63689013671875, 2.63980029296875, 2.639002685546875, 2.638836669921875, 2.63720849609375, 2.637477783203125, 2.637814697265625, 2.637075439453125, 2.637104248046875, 2.638521240234375, 2.63874755859375, 2.63686962890625, 2.63682763671875, 2.637802490234375, 2.63747998046875, 2.6362255859375, 2.63657568359375, 2.636940185546875, 2.642130859375, 2.6372998046875, 2.63684912109375, 2.63803271484375, 2.63796533203125, 2.637073486328125, 5.4244033203125, 2.636745849609375, 2.6364814453125, 2.6371123046875, 2.637408203125, 2.636239990234375, 2.63697607421875, 2.638215087890625, 2.639066162109375, 2.63918994140625, 2.639048583984375, 2.63975634765625, 2.639510498046875, 2.63817626953125, 2.637822021484375, 2.6385029296875, 2.637655029296875, 2.637551513671875, 2.636876708984375, 2.63750048828125, 2.63714599609375, 2.63798779296875, 2.63727099609375, 2.637442138671875, 2.636712890625, 2.637854736328125, 2.638200927734375, 2.6394736328125, 2.63754541015625, 2.636971923828125, 2.637844482421875, 2.63727197265625, 2.6361865234375, 2.636801025390625, 2.637619140625, 2.637947998046875, 2.63809228515625, 2.637560791015625, 2.6417490234375, 2.63729052734375, 2.63712158203125, 2.63689111328125, 2.63716259765625, 2.635864990234375, 2.6373251953125, 2.63663623046875, 2.63742578125, 2.637765625, 2.63775341796875, 2.638487548828125, 2.638257080078125, 2.63924951171875, 2.639295654296875, 2.639500244140625, 2.63644970703125, 2.637189208984375, 2.637624267578125, 2.639413330078125, 2.639233154296875, 2.63777587890625, 2.637401123046875, 2.637765625, 2.63680712890625, 5.42891748046875, 2.637751220703125, 2.6369423828125, 2.637287353515625, 2.6384208984375, 2.6376162109375, 2.637705078125, 2.6376396484375, 2.636984375, 2.636735595703125, 2.637454345703125, 2.63906005859375, 2.641005615234375, 2.639869873046875, 2.638488525390625, 2.638159912109375, 2.638043212890625, 2.6367958984375, 2.6363330078125, 2.637013916015625, 2.63684716796875, 2.63693115234375, 2.636872802734375, 2.64043212890625, 2.636421142578125, 2.63861962890625, 2.63849169921875, 2.638710693359375, 2.637824951171875, 2.6379111328125, 2.636610595703125, 2.6375966796875, 2.637263916015625, 2.63708984375, 2.637641845703125, 2.637759521484375, 2.6372197265625, 2.63781591796875, 2.63773583984375, 2.638950439453125, 2.637347900390625, 2.63666796875, 2.638234619140625, 2.638180419921875, 2.637719482421875, 2.63872216796875, 2.639690673828125, 2.63703662109375, 2.636780517578125, 2.636748779296875, 2.637224853515625, 2.636233642578125, 2.636378173828125, 2.636863525390625, 2.63923193359375, 2.638593994140625, 2.638180419921875, 2.63895556640625, 2.638899169921875, 2.637263916015625, 2.63600537109375, 2.637727783203125, 2.63825, 5.43155078125, 2.637918212890625, 2.63883056640625, 2.63889208984375, 2.63790185546875, 2.637695068359375, 2.636559326171875, 2.636966796875, 2.63714208984375, 2.639033447265625, 2.6404404296875, 2.640530517578125, 2.64198046875, 2.640649169921875, 2.637720458984375, 2.637758544921875, 2.639655029296875, 2.63736328125, 2.63708251953125, 2.636771240234375, 2.636508056640625, 2.63598583984375, 2.6370068359375, 2.636777587890625, 2.636590087890625, 2.63606884765625, 2.63714111328125, 2.63913671875, 2.637537353515625, 2.63705712890625, 2.636992431640625, 2.638615478515625, 2.636080078125, 2.637097900390625, 2.636854248046875, 2.640509033203125, 2.63925146484375, 2.6389052734375, 2.6383544921875, 2.63872705078125, 2.638085205078125, 2.6382275390625, 2.637551513671875, 2.637571044921875, 2.637200439453125, 2.63806982421875, 2.637642822265625, 2.637718505859375, 2.6373642578125, 2.6366044921875, 2.638297119140625, 2.63918798828125, 2.63943896484375, 2.6392646484375, 2.639478759765625, 2.637856689453125, 2.638085205078125, 2.638904296875, 2.6383564453125, 2.639803466796875, 2.640150634765625, 2.637856689453125, 2.640256103515625, 5.4306796875, 2.637127685546875, 2.6382705078125, 2.640280517578125, 2.639151123046875, 2.639075439453125, 2.63817529296875, 2.638035888671875, 2.638784423828125, 2.639287353515625, 2.637106201171875, 2.637173828125, 2.6368388671875, 2.6376591796875, 2.635720703125, 2.63617333984375, 2.636442626953125, 2.63904150390625, 2.6368818359375, 2.635509765625, 2.636674072265625, 2.6366484375, 2.637701171875, 2.638306396484375, 2.64076806640625, 2.63686865234375, 2.6378896484375, 2.639107177734375, 2.6401259765625, 2.6394111328125, 2.63695166015625, 2.63650927734375, 2.636439453125, 2.63674267578125, 2.637043701171875, 2.637275146484375, 2.637382568359375, 2.63693115234375, 2.636669921875, 2.637486083984375, 2.636729248046875, 2.636916748046875, 2.6372802734375, 2.638066650390625, 2.637075439453125, 2.637048828125, 2.636240966796875, 2.637787109375, 2.636506103515625, 2.636660888671875, 2.63712548828125, 2.637421630859375, 2.637157470703125, 2.636517333984375, 2.637552734375, 2.636777587890625, 2.63666064453125, 2.6360966796875, 2.637487060546875, 2.636655517578125, 2.6372158203125, 2.637021240234375, 2.63826025390625, 5.43210595703125, 2.637992919921875, 2.6383369140625, 2.636921875, 2.637557861328125, 2.637305908203125, 2.63912646484375, 2.63752099609375, 2.6370693359375, 2.638551025390625, 2.6377646484375, 2.638067626953125, 2.637068359375, 2.63798876953125, 2.6374892578125, 2.63644775390625, 2.636535888671875, 2.637727783203125, 2.63798876953125, 2.637382568359375, 2.637177734375, 2.637806640625, 2.63628173828125, 2.6376171875, 2.63735302734375, 2.63743896484375, 2.6397236328125, 2.637740966796875, 2.637970458984375, 2.6363720703125, 2.636199951171875, 2.6363935546875, 2.638331787109375, 2.636755859375, 2.636908447265625, 2.636675048828125, 2.63809033203125, 2.636525634765625, 2.63657373046875, 2.63596435546875, 2.63809130859375, 2.6373671875, 2.636396484375, 2.637010009765625, 2.637177734375, 2.636538818359375, 2.637276123046875, 2.638035888671875, 2.636796875, 2.636739501953125, 2.636539794921875, 2.63828076171875, 2.63714404296875, 2.637462646484375, 2.637360107421875, 2.638277587890625, 2.636430419921875, 2.6370283203125, 2.6365234375, 2.638316650390625, 2.636391357421875, 2.636579833984375, 2.636466064453125]",tokens/s,0.3734541728532224,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1312.858112,1030.22592,0.0,383.778816,312.280064,s,10,0.275944766998291,0.0275944766998291,0.0013350624992567249,0.0271289119720459,0.027794796180725095,0.02969363794326782,0.031212711353302003,"[0.03159247970581055, 0.027166879653930665, 0.027137279510498047, 0.02737283134460449, 0.02710416030883789, 0.02712054443359375, 0.027108831405639647, 0.027081024169921874, 0.027172767639160156, 0.027087968826293947]",tokens/s,9277.218871905097,kWh,3.2455721808658853e-07,1.778424162839056e-07,7.707522954780324e-07,1.2731519298485266e-06,tokens/kWh,201075766.3702066,MB,1312.858112,1030.22592,0.0,383.778816,347.089408,s,10,16.864729003906252,1.6864729003906251,0.020323100201592524,1.6794135742187501,1.6910009521484375,1.7188808471679686,1.7411847631835937,"[1.7467607421875, 1.67984912109375, 1.6774495849609374, 1.678377685546875, 1.67897802734375, 1.6768824462890626, 1.6750291748046875, 1.684479248046875, 1.684805419921875, 1.6821175537109374]",tokens/s,37.356070165970515,kWh,1.9882176955741114e-05,1.0895638286132514e-05,4.512764553432279e-05,7.590546077619641e-05,tokens/kWh,829979.8111462949,,s,629,17.07843275070189,0.027151721384263763,0.003315017386101685,0.026595327377319337,0.0273623046875,0.02783129653930664,0.05417021270751953,"[0.029247488021850586, 0.027447296142578126, 0.028238847732543947, 0.029016063690185546, 0.02889727973937988, 0.028690431594848635, 0.028497919082641602, 0.029652992248535157, 0.028964864730834962, 0.02876518440246582, 0.029016063690185546, 0.02860851287841797, 0.02900480079650879, 0.029263872146606446, 0.02835353660583496, 0.028241920471191406, 0.02977484893798828, 0.031457279205322264, 0.028201984405517577, 0.02774220848083496, 0.027778047561645508, 0.02750464057922363, 0.027625471115112304, 0.027877376556396483, 0.027576320648193358, 0.027623424530029295, 0.027853824615478515, 0.027623424530029295, 0.02755583953857422, 0.027467775344848632, 0.027467775344848632, 0.027464704513549806, 0.02751692771911621, 0.027671552658081053, 0.028444671630859376, 0.02751081657409668, 0.027527168273925783, 0.02693833541870117, 0.02656768035888672, 0.02656051254272461, 0.026586111068725587, 0.026575904846191406, 0.02656355285644531, 0.026597375869750976, 0.02668339157104492, 0.026562559127807618, 0.0265799674987793, 0.02705510330200195, 0.02689945602416992, 0.02673766326904297, 0.026646528244018555, 0.026635263442993166, 0.02674892807006836, 0.026631168365478516, 0.026613759994506835, 0.026697727203369142, 0.026650623321533205, 0.02710323143005371, 0.026925119400024414, 0.02659014320373535, 0.027423744201660157, 0.027077632904052733, 0.05460070419311523, 0.026619903564453123, 0.026565631866455077, 0.026629119873046874, 0.026554399490356446, 0.02660246467590332, 0.026628095626831053, 0.02656768035888672, 0.026635263442993166, 0.0266527042388916, 0.026569696426391603, 0.026598463058471678, 0.026578880310058593, 0.0265799674987793, 0.02658406448364258, 0.026687488555908204, 0.026688512802124024, 0.02656358337402344, 0.02654003143310547, 0.026614784240722656, 0.026625024795532228, 0.02652672004699707, 0.026619903564453123, 0.028004352569580077, 0.02753945541381836, 0.02736332893371582, 0.027281408309936524, 0.02712678337097168, 0.026574848175048828, 0.026603519439697267, 0.02654310417175293, 0.026565631866455077, 0.0265031681060791, 0.026557439804077147, 0.02653388786315918, 0.026556415557861326, 0.02654412841796875, 0.02655948829650879, 0.02668441581726074, 0.026625024795532228, 0.02655948829650879, 0.026588159561157225, 0.02654310417175293, 0.026572799682617186, 0.02655129623413086, 0.02656768035888672, 0.026604543685913085, 0.02655955123901367, 0.026596288681030273, 0.026562559127807618, 0.026596351623535155, 0.026594303131103517, 0.026558464050292968, 0.02655232048034668, 0.02659328079223633, 0.02651238441467285, 0.026549247741699217, 0.026588159561157225, 0.02657587242126465, 0.02659328079223633, 0.026556415557861326, 0.02650726318359375, 0.02656870460510254, 0.0541736946105957, 0.026597375869750976, 0.026587135314941408, 0.02655232048034668, 0.026550271987915038, 0.026600448608398438, 0.026652671813964843, 0.026689535140991212, 0.02655129623413086, 0.02653081512451172, 0.02658406448364258, 0.026677248001098632, 0.02651548767089844, 0.02671510314941406, 0.027616256713867186, 0.027520000457763674, 0.026843135833740234, 0.026626047134399415, 0.026536991119384765, 0.02662294387817383, 0.026621952056884765, 0.02655129623413086, 0.026534912109375, 0.026634239196777345, 0.026594303131103517, 0.02670182418823242, 0.0265799674987793, 0.02651136016845703, 0.02654003143310547, 0.02653388786315918, 0.02654412841796875, 0.026617855072021485, 0.026968063354492186, 0.02676736068725586, 0.026615808486938477, 0.02655539131164551, 0.02651545524597168, 0.026661888122558593, 0.026573823928833007, 0.026454015731811522, 0.02652470397949219, 0.026513376235961915, 0.0265031681060791, 0.02651852798461914, 0.026694656372070313, 0.026492927551269533, 0.026570751190185548, 0.026505216598510743, 0.026565631866455077, 0.026583040237426758, 0.026574848175048828, 0.02650931167602539, 0.02651033592224121, 0.026664960861206056, 0.026570751190185548, 0.02651238441467285, 0.026582015991210937, 0.02654515266418457, 0.02649087905883789, 0.02654719924926758, 0.026638336181640625, 0.02657689666748047, 0.026598400115966796, 0.05530112075805664, 0.027283456802368163, 0.027225088119506836, 0.02709503936767578, 0.026944511413574217, 0.02655436706542969, 0.02654310417175293, 0.026695680618286134, 0.026656768798828126, 0.0265850887298584, 0.026588191986083986, 0.02655023956298828, 0.026674175262451173, 0.026564607620239256, 0.026580991744995116, 0.026611711502075194, 0.026570751190185548, 0.0265533447265625, 0.026611711502075194, 0.026570751190185548, 0.026529792785644532, 0.02669977569580078, 0.026549247741699217, 0.02660767936706543, 0.02659424018859863, 0.02671001625061035, 0.02650111961364746, 0.02655539131164551, 0.026587135314941408, 0.02655232048034668, 0.026657791137695314, 0.0265533447265625, 0.026541055679321288, 0.026558464050292968, 0.02651238441467285, 0.026514432907104493, 0.026467327117919923, 0.026565631866455077, 0.02692403221130371, 0.026690559387207033, 0.02655436706542969, 0.026600448608398438, 0.026719232559204102, 0.02657689666748047, 0.026662912368774414, 0.02651136016845703, 0.026564607620239256, 0.026537984848022462, 0.026505216598510743, 0.02651238441467285, 0.02656358337402344, 0.02652262306213379, 0.026663936614990235, 0.026497024536132813, 0.026646528244018555, 0.02652876853942871, 0.026637311935424804, 0.02657587242126465, 0.02652672004699707, 0.02657177543640137, 0.026604543685913085, 0.026583040237426758, 0.026484735488891603, 0.054171646118164066, 0.02660147285461426, 0.026550271987915038, 0.02656972885131836, 0.02654515266418457, 0.02658406448364258, 0.026532863616943358, 0.02650726318359375, 0.02657792091369629, 0.02667622375488281, 0.0265799674987793, 0.02656153678894043, 0.026580991744995116, 0.026556480407714845, 0.02659833526611328, 0.026598400115966796, 0.026611711502075194, 0.026615808486938477, 0.026514432907104493, 0.02654207992553711, 0.02656768035888672, 0.02656870460510254, 0.026633216857910157, 0.026589183807373046, 0.02657177543640137, 0.02655436706542969, 0.026660863876342773, 0.026614784240722656, 0.026558464050292968, 0.02655436706542969, 0.026617855072021485, 0.026604543685913085, 0.026554399490356446, 0.026611679077148436, 0.026650623321533205, 0.026488832473754883, 0.02629631996154785, 0.02631372833251953, 0.0275230712890625, 0.027797504425048827, 0.02753023910522461, 0.02730803108215332, 0.02698854446411133, 0.026604543685913085, 0.02653081512451172, 0.026624000549316407, 0.02660147285461426, 0.026764287948608398, 0.026658815383911134, 0.02656870460510254, 0.026557439804077147, 0.02654617691040039, 0.026639360427856446, 0.026580991744995116, 0.026534912109375, 0.02653593635559082, 0.026594303131103517, 0.026620927810668944, 0.026596351623535155, 0.02657587242126465, 0.026604543685913085, 0.026604543685913085, 0.02653388786315918, 0.05420851135253906, 0.0265677433013916, 0.02660960006713867, 0.02667519950866699, 0.026580991744995116, 0.026594303131103517, 0.026562559127807618, 0.026665983200073243, 0.026590208053588867, 0.02653900718688965, 0.026637311935424804, 0.026638336181640625, 0.02654617691040039, 0.026590208053588867, 0.026562559127807618, 0.026696704864501954, 0.026629119873046874, 0.02652774429321289, 0.02652057647705078, 0.02653081512451172, 0.0265533447265625, 0.02654207992553711, 0.026694656372070313, 0.02658406448364258, 0.026640384674072266, 0.02657792091369629, 0.026534912109375, 0.02651545524597168, 0.026521600723266602, 0.02656153678894043, 0.02651852798461914, 0.026580991744995116, 0.02653388786315918, 0.026529792785644532, 0.0265799674987793, 0.026592256546020508, 0.026693632125854492, 0.02651545524597168, 0.026639360427856446, 0.027520000457763674, 0.02674380874633789, 0.026613759994506835, 0.026694656372070313, 0.026521600723266602, 0.026641408920288087, 0.026607616424560547, 0.026558464050292968, 0.026610687255859376, 0.026630144119262695, 0.02669260787963867, 0.026634271621704102, 0.02657072067260742, 0.02654003143310547, 0.02657587242126465, 0.026605567932128905, 0.026534912109375, 0.026697727203369142, 0.026587135314941408, 0.02652060890197754, 0.026654687881469727, 0.026598400115966796, 0.026714111328125, 0.02658406448364258, 0.054166526794433595, 0.02671615982055664, 0.026641408920288087, 0.026625024795532228, 0.0265994873046875, 0.026596288681030273, 0.02654719924926758, 0.026572799682617186, 0.026620927810668944, 0.026605567932128905, 0.02669977569580078, 0.02654316711425781, 0.026589120864868164, 0.026582015991210937, 0.026604543685913085, 0.026550271987915038, 0.026619903564453123, 0.02656051254272461, 0.026521600723266602, 0.026631168365478516, 0.02628505516052246, 0.02677350425720215, 0.026702848434448243, 0.02657177543640137, 0.02660966491699219, 0.02656358337402344, 0.02651545524597168, 0.0265533447265625, 0.026504192352294922, 0.02660147285461426, 0.02653900718688965, 0.026639360427856446, 0.0265850887298584, 0.026626047134399415, 0.02654003143310547, 0.026556447982788087, 0.026569696426391603, 0.026558464050292968, 0.02659328079223633, 0.026556415557861326, 0.02654617691040039, 0.02668339157104492, 0.026589183807373046, 0.026498048782348634, 0.026669055938720702, 0.026529792785644532, 0.026572799682617186, 0.02655129623413086, 0.0265164794921875, 0.026590208053588867, 0.02657792091369629, 0.026616832733154298, 0.026598400115966796, 0.026660863876342773, 0.026573823928833007, 0.026595327377319337, 0.026570751190185548, 0.0265164794921875, 0.02639366340637207, 0.026553279876708986, 0.02658406448364258, 0.026657791137695314, 0.02649497604370117, 0.05412044906616211, 0.026598400115966796, 0.026583040237426758, 0.026651647567749022, 0.02652262306213379, 0.026586111068725587, 0.02655129623413086, 0.026594303131103517, 0.02712678337097168, 0.02734489631652832, 0.026644479751586913, 0.026637311935424804, 0.026594303131103517, 0.026646528244018555, 0.026602495193481446, 0.027348991394042968, 0.027696128845214843, 0.027305984497070314, 0.027241472244262696, 0.02698854446411133, 0.026565631866455077, 0.026608640670776368, 0.027296768188476563, 0.027320320129394532, 0.027320320129394532, 0.027268096923828124, 0.02708684730529785, 0.026829824447631836, 0.026608640670776368, 0.02674073600769043, 0.02660147285461426, 0.026590208053588867, 0.02655129623413086, 0.02654207992553711, 0.02661075210571289, 0.0265860481262207, 0.02667622375488281, 0.026572799682617186, 0.026582015991210937, 0.026586111068725587, 0.02653081512451172, 0.026566656112670898, 0.02655232048034668, 0.026702848434448243, 0.026602495193481446, 0.02654617691040039, 0.026550271987915038, 0.026645503997802734, 0.026550271987915038, 0.026616832733154298, 0.02660966491699219, 0.026596351623535155, 0.026595327377319337, 0.026586111068725587, 0.02654617691040039, 0.02654617691040039, 0.026786815643310546, 0.02656153678894043, 0.026608640670776368, 0.026602495193481446, 0.026529792785644532, 0.026586111068725587, 0.026638336181640625, 0.05429862213134766, 0.026634239196777345, 0.026537984848022462, 0.026639360427856446, 0.02672127914428711, 0.02692915153503418, 0.027014144897460936, 0.026755071640014647, 0.026573823928833007, 0.026562559127807618, 0.026572799682617186, 0.026690559387207033, 0.02657177543640137, 0.026617855072021485, 0.026610687255859376, 0.026606592178344726, 0.026598400115966796, 0.026573823928833007, 0.026597375869750976, 0.026514432907104493, 0.026582015991210937, 0.02659328079223633, 0.026673152923583986, 0.027404319763183593, 0.026896352767944335, 0.026578943252563478, 0.026537984848022462, 0.026552352905273437, 0.02658710479736328, 0.026514432907104493, 0.02651238441467285, 0.026514432907104493, 0.02652262306213379, 0.02651238441467285, 0.02653081512451172, 0.026626047134399415, 0.026565631866455077, 0.026716224670410155, 0.026623935699462892, 0.026580991744995116, 0.02654617691040039, 0.026556415557861326, 0.026479616165161132, 0.02652876853942871, 0.026670080184936523, 0.026643455505371092, 0.026827775955200195, 0.026611711502075194, 0.027028480529785157, 0.027226112365722657, 0.027289600372314454, 0.02755583953857422, 0.0273623046875, 0.0273623046875, 0.02728550338745117, 0.02791116714477539, 0.027173887252807616, 0.0267775993347168, 0.026597375869750976, 0.02654515266418457, 0.02656153678894043, 0.026599424362182617, 0.026617855072021485, 0.05418803024291992, 0.026620927810668944, 0.026647552490234375, 0.02657689666748047, 0.02651238441467285, 0.026595327377319337, 0.026607616424560547, 0.026603519439697267, 0.026594303131103517, 0.026619903564453123, 0.02672230339050293, 0.026630144119262695, 0.026639360427856446, 0.02667622375488281, 0.026677248001098632, 0.026631168365478516, 0.026617855072021485, 0.026565631866455077, 0.026587135314941408, 0.0265164794921875, 0.026616832733154298, 0.026588159561157225, 0.026612735748291014, 0.026612735748291014, 0.02656768035888672, 0.026558464050292968, 0.026629119873046874, 0.026508287429809572, 0.026702848434448243, 0.02671615982055664, 0.0265533447265625, 0.026586111068725587, 0.026626047134399415, 0.02655232048034668, 0.02658406448364258, 0.02654412841796875, 0.02653081512451172, 0.0265799674987793, 0.026612735748291014, 0.026603519439697267, 0.02635366439819336, 0.026727424621582032, 0.02656153678894043, 0.026595327377319337, 0.026583040237426758, 0.026492927551269533, 0.026572799682617186, 0.02653593635559082, 0.026678272247314453, 0.02653900718688965, 0.02690559959411621, 0.02734284782409668, 0.02688102340698242, 0.026624000549316407, 0.02656358337402344, 0.026578943252563478, 0.027030527114868166, 0.027415552139282227, 0.02731724739074707, 0.027236352920532225, 0.027314176559448244, 0.027259904861450194, 0.027412479400634765]",tokens/s,36.83007739537158,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ef4-53db421d3fb01c4003b8773f;9700ecb4-6210-4f6a-996c-5c49dffdda43) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1255.170048,2645.03296,0.0,1998.585856,1692.285952,s,10,0.19596438217163084,0.019596438217163083,0.0005758753327798738,0.019594847679138182,0.020116428565979005,0.020397430515289305,0.020622232074737547,"[0.02067843246459961, 0.01995683288574219, 0.018657087326049804, 0.019397823333740235, 0.01875289535522461, 0.019354879379272463, 0.019658687591552735, 0.019531007766723632, 0.020053983688354492, 0.019922752380371094]",tokens/s,13063.598454120523,kWh,2.196003245524462e-07,1.203308225381941e-07,6.683390311598506e-07,1.008270178250491e-06,tokens/kWh,253900200.08744156,MB,1255.46496,2645.03296,0.0,1998.585856,1740.085248,s,10,11.909100097656252,1.1909100097656249,0.015963798664011486,1.1926223754882814,1.1998400390625,1.2087030395507812,1.215793439941406,"[1.2175660400390624, 1.1913038330078125, 1.18787646484375, 1.1978704833984375, 1.1491290283203126, 1.195384033203125, 1.1938314208984375, 1.19254345703125, 1.19089404296875, 1.1927012939453125]",tokens/s,52.90072254275419,kWh,1.3774184907753079e-05,7.5477682720333215e-06,2.8730712265842306e-05,5.005266544562869e-05,tokens/kWh,1258674.227218444,,s,629,12.064169954299933,0.019179920436088915,0.0023597437619934914,0.018893823623657227,0.019228466796875003,0.019791871643066405,0.038067117614746133,"[0.02008166313171387, 0.01979801559448242, 0.0196177921295166, 0.01978265571594238, 0.019717119216918946, 0.019862527847290038, 0.019771392822265626, 0.01966592025756836, 0.01981439971923828, 0.019919872283935547, 0.019991552352905274, 0.0198154239654541, 0.01984102439880371, 0.019713024139404296, 0.019945472717285157, 0.019693599700927735, 0.019734495162963866, 0.019999744415283204, 0.019870719909667968, 0.019916799545288084, 0.019698688507080078, 0.01944063949584961, 0.019574783325195313, 0.019809280395507813, 0.019941375732421874, 0.019547136306762695, 0.019590143203735352, 0.019150848388671874, 0.018918399810791017, 0.019148799896240236, 0.01879756736755371, 0.018762752532958983, 0.01876479911804199, 0.018760704040527345, 0.018949119567871094, 0.01901158332824707, 0.018997247695922852, 0.018856960296630858, 0.018832384109497072, 0.018873344421386717, 0.018898944854736328, 0.018821151733398437, 0.01887945556640625, 0.018886655807495118, 0.018907136917114258, 0.01886412811279297, 0.01882521629333496, 0.01883033561706543, 0.01877299118041992, 0.01884569549560547, 0.01886310386657715, 0.018886655807495118, 0.01883340835571289, 0.01906790351867676, 0.01885081672668457, 0.018737152099609376, 0.018815999984741212, 0.01884774398803711, 0.019143680572509765, 0.018949119567871094, 0.020289535522460937, 0.01920512008666992, 0.03925708770751953, 0.018815999984741212, 0.01889587211608887, 0.018832384109497072, 0.018815999984741212, 0.018876415252685547, 0.01883647918701172, 0.018874368667602538, 0.018893823623657227, 0.018908159255981445, 0.018892799377441406, 0.01882624053955078, 0.018942975997924806, 0.018923519134521484, 0.018984960556030273, 0.01907711982727051, 0.018955263137817382, 0.018844671249389648, 0.018907136917114258, 0.018997247695922852, 0.0189040641784668, 0.018984960556030273, 0.01885081672668457, 0.01881705665588379, 0.018884576797485352, 0.01886720085144043, 0.018889728546142577, 0.01884774398803711, 0.018916351318359375, 0.018886655807495118, 0.018868223190307617, 0.018915327072143554, 0.01883852767944336, 0.018860031127929687, 0.018909183502197266, 0.018886655807495118, 0.018922496795654296, 0.018905088424682616, 0.018777088165283205, 0.01884364891052246, 0.018903039932250978, 0.018914304733276367, 0.018931711196899414, 0.018917375564575196, 0.018893823623657227, 0.018744319915771485, 0.019489791870117186, 0.01881497573852539, 0.018911231994628908, 0.018894847869873048, 0.018968576431274413, 0.01905971145629883, 0.018966527938842775, 0.01887027168273926, 0.018861055374145508, 0.018988031387329102, 0.018940959930419922, 0.01884156799316406, 0.018929664611816405, 0.018964479446411133, 0.018922496795654296, 0.01888467216491699, 0.018768831253051756, 0.03698995208740234, 0.018000896453857423, 0.01803878402709961, 0.01861529541015625, 0.01846886444091797, 0.018893823623657227, 0.01886207962036133, 0.018841632843017576, 0.018986976623535157, 0.01903001594543457, 0.018913280487060546, 0.018872352600097658, 0.018723808288574218, 0.01885593605041504, 0.018965503692626954, 0.018915327072143554, 0.018923519134521484, 0.01879347229003906, 0.018799615859985352, 0.01884876823425293, 0.01886310386657715, 0.018957311630249024, 0.0188723201751709, 0.01919385528564453, 0.018920480728149416, 0.018839519500732423, 0.018932735443115235, 0.01881395149230957, 0.018926591873168946, 0.019110912322998046, 0.01883647918701172, 0.018770944595336913, 0.018880512237548826, 0.01885081672668457, 0.018919424057006837, 0.018876415252685547, 0.018939903259277344, 0.01885081672668457, 0.018897920608520507, 0.018840576171875, 0.018856960296630858, 0.01884876823425293, 0.01887539291381836, 0.01882009506225586, 0.018813983917236328, 0.018836448669433594, 0.018860031127929687, 0.019740671157836915, 0.019281919479370118, 0.018899967193603515, 0.018890783309936522, 0.018863071441650392, 0.01886412811279297, 0.01881804847717285, 0.018817024230957033, 0.018861055374145508, 0.01885081672668457, 0.018868223190307617, 0.018972671508789063, 0.018760704040527345, 0.018917375564575196, 0.018985984802246093, 0.01883647918701172, 0.03859558486938477, 0.018811904907226562, 0.019163135528564454, 0.02009702491760254, 0.019518463134765626, 0.018994176864624023, 0.01887948799133301, 0.019116031646728517, 0.01887539291381836, 0.018940927505493164, 0.01884364891052246, 0.01884364891052246, 0.01904435157775879, 0.018973695755004884, 0.018964479446411133, 0.019170303344726563, 0.01942630386352539, 0.018782207489013672, 0.018939903259277344, 0.018861055374145508, 0.018964479446411133, 0.018869247436523438, 0.018767871856689454, 0.01895427131652832, 0.018802656173706054, 0.018971647262573242, 0.018899967193603515, 0.01889587211608887, 0.01884876823425293, 0.019386367797851564, 0.018991104125976564, 0.019079168319702147, 0.018916351318359375, 0.018882560729980468, 0.01925632095336914, 0.018929664611816405, 0.018899967193603515, 0.018811935424804686, 0.01888355255126953, 0.018973695755004884, 0.018752511978149415, 0.0189040641784668, 0.019373056411743163, 0.02027008056640625, 0.020695039749145508, 0.01957683181762695, 0.019105791091918945, 0.01883852767944336, 0.018929664611816405, 0.018824192047119142, 0.01904025650024414, 0.01886412811279297, 0.01887539291381836, 0.01883852767944336, 0.01881292724609375, 0.018732032775878905, 0.018886655807495118, 0.018578432083129884, 0.01839926338195801, 0.018844640731811524, 0.018790399551391602, 0.01878937530517578, 0.018752511978149415, 0.03698483276367188, 0.017928192138671875, 0.01796505546569824, 0.01789952087402344, 0.0178657283782959, 0.017949695587158202, 0.017992704391479493, 0.017977344512939454, 0.017977344512939454, 0.018036735534667968, 0.017976320266723633, 0.017991680145263672, 0.017904640197753906, 0.01803468894958496, 0.01804902458190918, 0.018043903350830077, 0.017953792572021485, 0.01813811111450195, 0.01799782371520996, 0.018018304824829103, 0.017966079711914062, 0.018051071166992186, 0.017977344512939454, 0.01800704002380371, 0.01799679946899414, 0.01804083251953125, 0.017994752883911135, 0.018061311721801757, 0.018028543472290038, 0.018044927597045898, 0.017864704132080078, 0.017921024322509766, 0.017951744079589844, 0.017963008880615236, 0.017930240631103517, 0.01801420783996582, 0.017941503524780272, 0.018001920700073244, 0.017939456939697264, 0.017915903091430666, 0.017966079711914062, 0.017979391098022462, 0.017955839157104494, 0.01800294494628906, 0.017899551391601563, 0.017976287841796876, 0.01799679946899414, 0.01827123260498047, 0.01843097686767578, 0.018742271423339844, 0.01887948799133301, 0.018784255981445314, 0.018868223190307617, 0.01879859161376953, 0.01882009506225586, 0.01982771110534668, 0.02038377571105957, 0.020016096115112306, 0.01902079963684082, 0.018759679794311524, 0.018523136138916017, 0.018774015426635742, 0.018930688858032226, 0.03848601531982422, 0.019079168319702147, 0.018916351318359375, 0.018967552185058592, 0.01886207962036133, 0.01879347229003906, 0.018959360122680666, 0.01920921516418457, 0.019406848907470704, 0.018989055633544923, 0.018952192306518553, 0.019162111282348633, 0.018876415252685547, 0.01877299118041992, 0.01900748825073242, 0.018959360122680666, 0.019094528198242186, 0.01884364891052246, 0.018897920608520507, 0.018896896362304686, 0.01880473518371582, 0.018931711196899414, 0.018745344161987306, 0.01838591957092285, 0.01842995262145996, 0.01898089599609375, 0.01894806480407715, 0.018896896362304686, 0.018777151107788086, 0.018982847213745116, 0.01923583984375, 0.018766847610473633, 0.01887846374511719, 0.018757631301879883, 0.018769920349121092, 0.019090431213378906, 0.019116031646728517, 0.01899929618835449, 0.019524608612060547, 0.02007961654663086, 0.01907711982727051, 0.01902284812927246, 0.018975744247436522, 0.019171327590942384, 0.018874368667602538, 0.018787328720092773, 0.01904844856262207, 0.018750463485717773, 0.01922662353515625, 0.018884607315063476, 0.018779136657714843, 0.01897881507873535, 0.019174400329589843, 0.019117055892944337, 0.01881907272338867, 0.01883955192565918, 0.019125247955322267, 0.01887846374511719, 0.018976768493652343, 0.018964479446411133, 0.018942975997924806, 0.01901260757446289, 0.019120128631591796, 0.03889766311645508, 0.018888704299926756, 0.01882624053955078, 0.01916316795349121, 0.01887331199645996, 0.01903104019165039, 0.018936832427978514, 0.018971647262573242, 0.018852863311767578, 0.018949119567871094, 0.018922496795654296, 0.018955263137817382, 0.0188221435546875, 0.018824192047119142, 0.018950143814086915, 0.018929664611816405, 0.018951168060302736, 0.01887129592895508, 0.019127296447753905, 0.019130367279052735, 0.018890752792358398, 0.018958335876464845, 0.0188723201751709, 0.019166208267211913, 0.01922150421142578, 0.018877439498901367, 0.018980863571166993, 0.018884607315063476, 0.0188590087890625, 0.018983936309814452, 0.019647487640380858, 0.018966527938842775, 0.018917375564575196, 0.01881907272338867, 0.018967552185058592, 0.018912256240844725, 0.018912256240844725, 0.01900032043457031, 0.018948095321655273, 0.01884979248046875, 0.018891775131225585, 0.018865152359008788, 0.018935808181762694, 0.018865152359008788, 0.018948095321655273, 0.018892799377441406, 0.018840576171875, 0.01882009506225586, 0.018924543380737305, 0.01885798454284668, 0.018974752426147462, 0.018999263763427733, 0.018900991439819336, 0.018874368667602538, 0.018922496795654296, 0.018955263137817382, 0.018931711196899414, 0.01878937530517578, 0.018860031127929687, 0.018972671508789063, 0.018959360122680666, 0.019132448196411134, 0.018904031753540038, 0.03866828918457031, 0.01882521629333496, 0.01878118324279785, 0.018948095321655273, 0.018792448043823243, 0.018868223190307617, 0.018778112411499022, 0.018958368301391602, 0.01888559913635254, 0.0188272647857666, 0.018973695755004884, 0.01886720085144043, 0.018776063919067384, 0.01901055908203125, 0.018993152618408202, 0.018917375564575196, 0.019162111282348633, 0.018950143814086915, 0.018990079879760743, 0.018930688858032226, 0.018890752792358398, 0.018946048736572265, 0.018886655807495118, 0.018832384109497072, 0.018940927505493164, 0.018965503692626954, 0.01929113578796387, 0.018768896102905275, 0.018882560729980468, 0.018877439498901367, 0.01883340835571289, 0.018912256240844725, 0.018927648544311525, 0.01891529655456543, 0.018811904907226562, 0.018910207748413087, 0.01882009506225586, 0.018910207748413087, 0.018745344161987306, 0.018956287384033203, 0.018868223190307617, 0.018768896102905275, 0.01880575942993164, 0.018736127853393555, 0.018824192047119142, 0.018956287384033203, 0.01884979248046875, 0.018948095321655273, 0.01881907272338867, 0.01880985641479492, 0.01883955192565918, 0.018891775131225585, 0.01883340835571289, 0.01884979248046875, 0.01884569549560547, 0.018924543380737305, 0.018920448303222655, 0.018898944854736328, 0.018893823623657227, 0.019284992218017577, 0.01971609687805176, 0.01968639945983887, 0.01919692802429199, 0.03932364654541016, 0.018924543380737305, 0.019127296447753905, 0.01901875114440918, 0.018907136917114258, 0.018896896362304686, 0.01885798454284668, 0.01881292724609375, 0.01900441551208496, 0.018884607315063476, 0.018885631561279297, 0.01883955192565918, 0.018886688232421876, 0.018969663619995115, 0.018922399520874024, 0.018976768493652343, 0.018914304733276367, 0.01907302474975586, 0.019050495147705078, 0.018923519134521484, 0.018939903259277344, 0.0189040641784668, 0.018877439498901367, 0.01883852767944336, 0.018897920608520507, 0.018779136657714843, 0.018916351318359375, 0.018919424057006837, 0.018974720001220705, 0.018869247436523438, 0.01886617660522461, 0.018947071075439453, 0.018897920608520507, 0.018967552185058592, 0.018948095321655273, 0.018880512237548826, 0.01887846374511719, 0.018902015686035157, 0.01886310386657715, 0.01887846374511719, 0.018799615859985352, 0.018912256240844725, 0.018832384109497072, 0.01883852767944336, 0.018920448303222655, 0.019014656066894533, 0.0188272647857666, 0.018974720001220705, 0.01883852767944336, 0.018874368667602538, 0.018527231216430663, 0.018898944854736328, 0.01881292724609375, 0.01879756736755371, 0.018902015686035157, 0.018908159255981445, 0.018832384109497072, 0.018918399810791017, 0.018792448043823243, 0.018888704299926756, 0.01884880065917969, 0.018927583694458006, 0.018953216552734374, 0.039212032318115236, 0.018920448303222655, 0.018912256240844725, 0.018958335876464845, 0.018861055374145508, 0.018928640365600585, 0.01884774398803711, 0.01884569549560547, 0.01886617660522461, 0.018934783935546876, 0.018942975997924806, 0.01904332733154297, 0.018899967193603515, 0.018918399810791017, 0.01901670455932617, 0.018998271942138673, 0.019309568405151366, 0.02033459281921387, 0.01902899169921875, 0.018954240798950195, 0.019001344680786132, 0.018902015686035157, 0.018910207748413087, 0.018935808181762694, 0.01882828712463379, 0.0188590087890625, 0.019208192825317383, 0.019014656066894533, 0.018882560729980468, 0.019042303085327148, 0.019025951385498046, 0.019065824508666993, 0.018945024490356444, 0.018880512237548826, 0.018861055374145508, 0.018893823623657227, 0.01887539291381836, 0.018876415252685547, 0.01903104019165039, 0.018890752792358398, 0.01881497573852539, 0.01882009506225586, 0.01887948799133301, 0.01878835105895996, 0.018860031127929687, 0.018661376953125, 0.018603008270263673, 0.01880473518371582, 0.018817024230957033, 0.0188723201751709, 0.018905088424682616, 0.018860031127929687, 0.018917375564575196, 0.018925567626953126, 0.018750463485717773, 0.01882009506225586, 0.018754560470581053, 0.0188538875579834, 0.01883443260192871, 0.01882931137084961, 0.01881088066101074, 0.018900991439819336, 0.018924543380737305]",tokens/s,52.13785966068979,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1279.725568,872.93952,0.0,226.492416,184.397824,s,12,0.17399129676818847,0.014499274730682373,0.0004405777377392672,0.014355583667755127,0.01475712022781372,0.015286664152145383,0.01576770728111267,"[0.01588796806335449, 0.014419008255004882, 0.014310144424438476, 0.01430396842956543, 0.014377471923828124, 0.014377087593078612, 0.01433407974243164, 0.014250847816467285, 0.014390144348144531, 0.01479468822479248, 0.014266719818115234, 0.014279168128967285]",tokens/s,17656.055544507362,kWh,1.7155437431226257e-07,9.399407219055296e-08,3.2474264005518264e-07,5.902910865579981e-07,tokens/kWh,433684339.52262825,MB,1280.053248,872.93952,0.0,226.492416,197.932544,s,12,10.420531616210937,0.8683776346842448,0.009197306451406238,0.8653596801757812,0.8716784362792969,0.8835345275878906,0.8948784387207032,"[0.8977144165039063, 0.866240966796875, 0.8642362670898438, 0.8648695068359376, 0.8719328002929687, 0.8635298461914063, 0.862948974609375, 0.866379638671875, 0.86938916015625, 0.865849853515625, 0.8638052368164062, 0.8636349487304688]",tokens/s,72.54908174012076,kWh,1.0699516429705703e-05,5.862862695252688e-06,1.9223555826850592e-05,3.578593495180898e-05,tokens/kWh,1760468.186309475,,s,755,10.563054601669307,0.013990800796912995,0.001781085498877292,0.013702143669128418,0.014027775764465332,0.014339276885986327,0.028389335021972658,"[0.015265791893005372, 0.014698495864868164, 0.014765055656433105, 0.014334976196289062, 0.014128128051757812, 0.014334976196289062, 0.014543871879577636, 0.014234623908996581, 0.0144650239944458, 0.01454694366455078, 0.014418944358825684, 0.01439027214050293, 0.01425100803375244, 0.014261247634887696, 0.014215167999267577, 0.014323712348937988, 0.014708736419677734, 0.014633983612060546, 0.01434931182861328, 0.01467801570892334, 0.014370816230773926, 0.014229503631591797, 0.014525440216064453, 0.014231552124023437, 0.014211071968078613, 0.014244864463806153, 0.014368767738342286, 0.014796799659729003, 0.014531583786010742, 0.014297087669372559, 0.014436384201049805, 0.014063584327697753, 0.014220288276672363, 0.013916159629821777, 0.014133248329162598, 0.014468095779418945, 0.014696479797363281, 0.014910431861877441, 0.014256128311157227, 0.014102527618408203, 0.01407590389251709, 0.014150655746459961, 0.014071807861328126, 0.013922304153442382, 0.013725728034973144, 0.01371132755279541, 0.013733887672424316, 0.013731840133666993, 0.014045184135437011, 0.014156800270080566, 0.014639103889465332, 0.014141440391540527, 0.013728863716125488, 0.013714336395263671, 0.013676544189453126, 0.013729791641235351, 0.013732864379882812, 0.013691904067993164, 0.013711359977722168, 0.013743103981018067, 0.013743103981018067, 0.013660223960876464, 0.0284968318939209, 0.013691904067993164, 0.01368883228302002, 0.013686783790588379, 0.013670399665832519, 0.01399295997619629, 0.013728768348693847, 0.013667327880859375, 0.013691904067993164, 0.01365503978729248, 0.013710335731506347, 0.013666303634643554, 0.013699071884155273, 0.013669376373291015, 0.013689855575561523, 0.013734911918640137, 0.013744128227233888, 0.01374617576599121, 0.01366323184967041, 0.013693951606750488, 0.013799424171447755, 0.013740032196044923, 0.013728768348693847, 0.013709312438964843, 0.013709312438964843, 0.01379532814025879, 0.013731840133666993, 0.013713408470153808, 0.013707263946533203, 0.013776896476745605, 0.01368883228302002, 0.013706239700317382, 0.013716480255126954, 0.013789183616638183, 0.014757887840270996, 0.013946911811828614, 0.013715423583984376, 0.013734911918640137, 0.013693951606750488, 0.01369600009918213, 0.013839360237121581, 0.013735936164855958, 0.013712384223937989, 0.013700096130371094, 0.013719552040100098, 0.013707263946533203, 0.013677568435668945, 0.013913087844848633, 0.013707263946533203, 0.013698047637939453, 0.013667360305786132, 0.013723615646362305, 0.013713408470153808, 0.013699071884155273, 0.013731840133666993, 0.013686783790588379, 0.013784064292907714, 0.01374828815460205, 0.013744064331054687, 0.013783040046691895, 0.013717503547668456, 0.013740032196044923, 0.013710335731506347, 0.028412927627563478, 0.013706239700317382, 0.013686783790588379, 0.013731840133666993, 0.013669376373291015, 0.013661184310913087, 0.013645824432373046, 0.013658111572265624, 0.013669376373291015, 0.013628416061401367, 0.013694975852966309, 0.013636608123779297, 0.013705216407775878, 0.013678591728210449, 0.013858816146850587, 0.01386086368560791, 0.01368883228302002, 0.013693951606750488, 0.013660160064697266, 0.01374518394470215, 0.013757408142089844, 0.013689855575561523, 0.013717503547668456, 0.01369600009918213, 0.013725695610046386, 0.013678591728210449, 0.013732895851135254, 0.013722592353820801, 0.0136878080368042, 0.013707263946533203, 0.013698047637939453, 0.013684736251831055, 0.013669376373291015, 0.013684736251831055, 0.01375641632080078, 0.013708288192749024, 0.013765631675720215, 0.013703167915344238, 0.013721599578857421, 0.013702143669128418, 0.013694975852966309, 0.013763584136962891, 0.01369600009918213, 0.013714431762695312, 0.013794303894042969, 0.013682687759399414, 0.013697024345397948, 0.013739007949829102, 0.013708288192749024, 0.013678591728210449, 0.013742079734802246, 0.013710335731506347, 0.013716480255126954, 0.013678591728210449, 0.013706239700317382, 0.013776896476745605, 0.013827072143554688, 0.013702143669128418, 0.013737983703613281, 0.013683712005615235, 0.013726719856262207, 0.013710335731506347, 0.013788160324096679, 0.028406784057617186, 0.013711359977722168, 0.013690879821777344, 0.013694975852966309, 0.013714431762695312, 0.013697024345397948, 0.013701120376586913, 0.01367142391204834, 0.013676544189453126, 0.013768704414367675, 0.01366528034210205, 0.013711423873901367, 0.013736895561218262, 0.013741056442260742, 0.01368064022064209, 0.013705216407775878, 0.013719552040100098, 0.013714431762695312, 0.013728768348693847, 0.013700096130371094, 0.01368883228302002, 0.013689855575561523, 0.013683712005615235, 0.013835264205932616, 0.013717503547668456, 0.013843520164489746, 0.013794239997863769, 0.013710335731506347, 0.013718527793884277, 0.013725695610046386, 0.01368064022064209, 0.013674495697021484, 0.013712384223937989, 0.013718527793884277, 0.013752320289611816, 0.013718527793884277, 0.013736960411071777, 0.01367347240447998, 0.013757439613342285, 0.01370419216156006, 0.013725695610046386, 0.013716480255126954, 0.013707263946533203, 0.013740032196044923, 0.013692928314208984, 0.013726719856262207, 0.013685759544372558, 0.013721664428710938, 0.013703104019165038, 0.01367142391204834, 0.013768704414367675, 0.01369600009918213, 0.013686783790588379, 0.013691904067993164, 0.01368883228302002, 0.013734911918640137, 0.013686783790588379, 0.01375334358215332, 0.013711359977722168, 0.013979647636413574, 0.013771776199340821, 0.013735936164855958, 0.013831232070922852, 0.028631999969482423, 0.013729824066162109, 0.013675488471984864, 0.01368172836303711, 0.013717439651489258, 0.013657088279724122, 0.01366528034210205, 0.013697024345397948, 0.013677568435668945, 0.01365503978729248, 0.013760512351989745, 0.013660160064697266, 0.01366220760345459, 0.013717503547668456, 0.013717503547668456, 0.013676544189453126, 0.013710335731506347, 0.013657088279724122, 0.013947903633117676, 0.013619199752807617, 0.013640704154968262, 0.01366528034210205, 0.013693951606750488, 0.01365401554107666, 0.013627391815185547, 0.013701120376586913, 0.013742079734802246, 0.013648896217346192, 0.013675552368164062, 0.013744095802307128, 0.01440665626525879, 0.01619865608215332, 0.016926719665527345, 0.014181376457214356, 0.01386188793182373, 0.013735936164855958, 0.01376153564453125, 0.013664256095886231, 0.013693951606750488, 0.013689855575561523, 0.013661184310913087, 0.013678591728210449, 0.013649920463562011, 0.014139391899108887, 0.013820927619934082, 0.013752320289611816, 0.013699071884155273, 0.014027775764465332, 0.01368064022064209, 0.013682687759399414, 0.013675519943237305, 0.013695039749145509, 0.01367750358581543, 0.01376460838317871, 0.014016511917114258, 0.0136878080368042, 0.013678591728210449, 0.01369600009918213, 0.013848575592041015, 0.013717503547668456, 0.0136878080368042, 0.013729791641235351, 0.013838335990905762, 0.028457984924316407, 0.013737983703613281, 0.013685759544372558, 0.013691904067993164, 0.013822976112365723, 0.013697024345397948, 0.013604864120483399, 0.013649920463562011, 0.013724672317504882, 0.013774847984313965, 0.013691904067993164, 0.01366528034210205, 0.013736960411071777, 0.013702143669128418, 0.0136878080368042, 0.013701120376586913, 0.013822976112365723, 0.013692928314208984, 0.013674495697021484, 0.013799424171447755, 0.013674495697021484, 0.01368166446685791, 0.013725695610046386, 0.013814784049987794, 0.013684736251831055, 0.01368883228302002, 0.013848608016967773, 0.013685728073120117, 0.013669376373291015, 0.013674495697021484, 0.01367961597442627, 0.01367244815826416, 0.013739007949829102, 0.01367961597442627, 0.013712384223937989, 0.013697024345397948, 0.013697024345397948, 0.013740032196044923, 0.01376460838317871, 0.013657088279724122, 0.013855744361877441, 0.013742079734802246, 0.013683712005615235, 0.013691904067993164, 0.013676544189453126, 0.013682687759399414, 0.013661184310913087, 0.013661184310913087, 0.013647904396057129, 0.013673439979553222, 0.013668352127075196, 0.013643775939941406, 0.013797375679016113, 0.013658111572265624, 0.013664256095886231, 0.013652031898498534, 0.01367033576965332, 0.013643775939941406, 0.013650943756103515, 0.013666303634643554, 0.013641728401184081, 0.01367142391204834, 0.013726719856262207, 0.028403711318969727, 0.013668352127075196, 0.013674495697021484, 0.01363046360015869, 0.013641728401184081, 0.013643775939941406, 0.01365503978729248, 0.01366220760345459, 0.01366220760345459, 0.01368166446685791, 0.013640704154968262, 0.013685759544372558, 0.013637632369995116, 0.013725695610046386, 0.013702143669128418, 0.013674495697021484, 0.013714431762695312, 0.013770751953125, 0.013796352386474609, 0.013739007949829102, 0.013711359977722168, 0.01367244815826416, 0.013684736251831055, 0.013736960411071777, 0.01367244815826416, 0.013675519943237305, 0.01368992042541504, 0.01368569564819336, 0.01368166446685791, 0.01367961597442627, 0.0136878080368042, 0.01367961597442627, 0.013676544189453126, 0.013953023910522461, 0.013818880081176758, 0.01379532814025879, 0.013685759544372558, 0.01367347240447998, 0.013747200012207032, 0.013718527793884277, 0.013677568435668945, 0.013668352127075196, 0.013685759544372558, 0.01365503978729248, 0.013649920463562011, 0.013637632369995116, 0.013633536338806153, 0.013768704414367675, 0.01365401554107666, 0.013760543823242188, 0.013644767761230468, 0.013698047637939453, 0.013644800186157227, 0.013639679908752441, 0.013675519943237305, 0.013670399665832519, 0.013651968002319336, 0.013715456008911133, 0.01376972770690918, 0.013659135818481445, 0.01366528034210205, 0.013699071884155273, 0.013642751693725585, 0.02831974411010742, 0.013700096130371094, 0.013639679908752441, 0.013622271537780761, 0.013687840461730957, 0.013710304260253907, 0.013718527793884277, 0.01368166446685791, 0.01366323184967041, 0.013705216407775878, 0.01368064022064209, 0.013686783790588379, 0.013666303634643554, 0.013668352127075196, 0.013682687759399414, 0.013727744102478028, 0.013724672317504882, 0.013721599578857421, 0.013724672317504882, 0.014102527618408203, 0.01380352020263672, 0.013740032196044923, 0.013737983703613281, 0.013920255661010742, 0.013740032196044923, 0.013717503547668456, 0.013843456268310546, 0.01380352020263672, 0.013684736251831055, 0.013757439613342285, 0.013758463859558106, 0.013974528312683105, 0.013858816146850587, 0.01417420768737793, 0.013920255661010742, 0.013936639785766602, 0.01377280044555664, 0.013703167915344238, 0.01368883228302002, 0.013677568435668945, 0.013684736251831055, 0.013694975852966309, 0.013688863754272461, 0.013667296409606934, 0.013786111831665039, 0.013828096389770507, 0.013668352127075196, 0.013664256095886231, 0.013733887672424316, 0.013702143669128418, 0.013656064033508301, 0.013676544189453126, 0.01377894401550293, 0.013708288192749024, 0.0136878080368042, 0.013683712005615235, 0.013735936164855958, 0.01395404815673828, 0.013817855834960938, 0.013650943756103515, 0.013703167915344238, 0.013696063995361328, 0.013751232147216796, 0.028461055755615236, 0.013700096130371094, 0.013645824432373046, 0.014017536163330077, 0.013718527793884277, 0.015047679901123047, 0.014234623908996581, 0.014027775764465332, 0.01408512020111084, 0.014048255920410157, 0.014173184394836426, 0.013982720375061035, 0.014036992073059081, 0.013888511657714844, 0.013721599578857421, 0.01374617576599121, 0.013685759544372558, 0.013708288192749024, 0.013652992248535157, 0.013644800186157227, 0.013687871932983398, 0.013667263984680177, 0.01366220760345459, 0.01367347240447998, 0.01367961597442627, 0.013639679908752441, 0.01368166446685791, 0.013686783790588379, 0.013645824432373046, 0.013686783790588379, 0.01366220760345459, 0.013650943756103515, 0.013632512092590332, 0.013693951606750488, 0.013690879821777344, 0.013845503807067871, 0.013686783790588379, 0.013660160064697266, 0.013715456008911133, 0.01387724781036377, 0.014060544013977052, 0.01405951976776123, 0.013849599838256836, 0.013645824432373046, 0.013682687759399414, 0.013699071884155273, 0.013699071884155273, 0.013707263946533203, 0.013682687759399414, 0.013713408470153808, 0.013652992248535157, 0.013708288192749024, 0.01368166446685791, 0.01368064022064209, 0.013719552040100098, 0.013873151779174805, 0.01370419216156006, 0.013788224220275879, 0.013749183654785157, 0.013675519943237305, 0.01366329574584961, 0.01380140781402588, 0.014252032279968262, 0.029146112442016602, 0.013622271537780761, 0.013830143928527832, 0.013846528053283692, 0.013924351692199707, 0.013684736251831055, 0.013667327880859375, 0.01367347240447998, 0.01363865566253662, 0.01366323184967041, 0.013697024345397948, 0.013644800186157227, 0.013669376373291015, 0.01386086368560791, 0.013948927879333496, 0.013683712005615235, 0.013719552040100098, 0.013699071884155273, 0.013691904067993164, 0.013702143669128418, 0.013697024345397948, 0.013853728294372558, 0.013726688385009766, 0.013720576286315917, 0.013678591728210449, 0.01369600009918213, 0.013735936164855958, 0.013697024345397948, 0.013715456008911133, 0.013755392074584961, 0.013735936164855958, 0.013721599578857421, 0.013707263946533203, 0.013883392333984374, 0.013845503807067871, 0.013723648071289063, 0.013712384223937989, 0.013705216407775878, 0.01368883228302002, 0.013685759544372558, 0.013721599578857421, 0.013728768348693847, 0.013697024345397948, 0.013751296043395997, 0.013793279647827148, 0.013677568435668945, 0.01369600009918213, 0.013702143669128418, 0.013920255661010742, 0.013789183616638183, 0.013728768348693847, 0.013712384223937989, 0.01368166446685791, 0.013733920097351074, 0.013666272163391113, 0.01387724781036377, 0.013720576286315917, 0.013702143669128418, 0.013740032196044923, 0.013721599578857421, 0.013722623825073242, 0.013712384223937989, 0.01376153564453125, 0.02837708854675293, 0.01365401554107666, 0.01364684772491455, 0.013660160064697266, 0.013664256095886231, 0.013692928314208984, 0.013957119941711426, 0.01405951976776123, 0.013983743667602539, 0.01367961597442627, 0.013649920463562011, 0.013858880043029785, 0.013722559928894044, 0.013641728401184081, 0.013668352127075196, 0.013664256095886231, 0.013723648071289063, 0.01365503978729248, 0.013669376373291015, 0.01367142391204834, 0.01370419216156006, 0.01368166446685791, 0.01367142391204834, 0.013675519943237305, 0.013628416061401367, 0.013627391815185547, 0.013661184310913087, 0.013818880081176758, 0.013710335731506347, 0.013647871971130371, 0.013670399665832519, 0.01368064022064209, 0.01367347240447998, 0.013683712005615235, 0.01368883228302002, 0.013701120376586913, 0.013677568435668945, 0.013657088279724122, 0.013692928314208984, 0.013644800186157227, 0.013676544189453126, 0.013677568435668945, 0.01380352020263672, 0.013749247550964355, 0.013674495697021484, 0.013695039749145509, 0.013658047676086426, 0.013690879821777344, 0.01367244815826416, 0.013657088279724122, 0.013640704154968262, 0.013657088279724122, 0.013793279647827148, 0.013695008277893066, 0.013856736183166504, 0.013647871971130371, 0.013661215782165528, 0.013750240325927735, 0.013722623825073242, 0.013768704414367675, 0.013710335731506347, 0.013717503547668456, 0.01366220760345459, 0.028322816848754883, 0.013648896217346192, 0.013686783790588379, 0.01369600009918213, 0.013699071884155273, 0.0136878080368042, 0.013691904067993164, 0.013702143669128418, 0.013697024345397948, 0.013683712005615235, 0.013700096130371094, 0.013693951606750488, 0.013722623825073242, 0.013705216407775878, 0.013691935539245605, 0.013717472076416016, 0.013724672317504882, 0.013700096130371094, 0.013664256095886231, 0.013639679908752441, 0.013757439613342285, 0.013656064033508301, 0.013667327880859375, 0.01366431999206543, 0.013849535942077636, 0.013924351692199707, 0.013633567810058594, 0.013722592353820801, 0.013668352127075196, 0.013906944274902343, 0.013789183616638183, 0.013662240028381348, 0.013687775611877442, 0.013670399665832519, 0.01367347240447998, 0.01366528034210205, 0.013657088279724122, 0.013661184310913087, 0.013678591728210449, 0.013684736251831055, 0.013658143997192384, 0.013722592353820801, 0.01366528034210205, 0.013637632369995116, 0.01367347240447998, 0.01365503978729248, 0.01367347240447998, 0.013744128227233888, 0.01370419216156006, 0.013693951606750488, 0.013711359977722168, 0.01368064022064209, 0.01368064022064209, 0.013691904067993164, 0.01369600009918213, 0.013724672317504882, 0.013697024345397948, 0.01367142391204834, 0.013682687759399414, 0.01365503978729248, 0.013683712005615235, 0.013806591987609864, 0.013955072402954101]",tokens/s,71.4755369986145,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4499.935232,14621.868032,0.0,13975.420928,13365.937152,s,10,16.410234252929687,1.6410234252929687,0.0022480805464608366,1.6403901367187501,1.6418083374023436,1.6447224060058594,1.6470536608886719,"[1.647636474609375, 1.63996875, 1.6411607666015624, 1.640802978515625, 1.64035791015625, 1.6397760009765625, 1.6397884521484376, 1.6398060302734374, 1.6405145263671874, 1.64042236328125]",tokens/s,156.00021063337155,kWh,1.9377978377872043e-05,1.0617021217130968e-05,9.108710064739678e-05,0.00012108210024239978,tokens/kWh,2114267.9181109504,MB,4499.935232,14621.868032,0.0,13975.420928,13814.413824,s,10,975.3211015625,97.53211015625,0.011202982505412533,97.52955078125,97.54984375,97.55100390625,97.55193203125,"[97.540578125, 97.52715625, 97.522640625, 97.5219296875, 97.5304609375, 97.51653125, 97.5521640625, 97.5495859375, 97.5314140625, 97.528640625]",tokens/s,0.6459411151780855,kWh,0.0011513337257504463,0.0006310327948099438,0.005346427777138807,0.007128794297699197,tokens/kWh,8837.399056434146,,s,629,988.5350541992196,1.5715978604121124,0.19496209506894618,1.548072998046875,1.5490094970703125,1.549420166015625,3.189316650390625,"[1.547826171875, 1.5488624267578126, 1.5484969482421875, 1.5483443603515625, 1.5481630859375, 1.5493089599609375, 1.548452880859375, 1.5482459716796875, 1.5481619873046875, 1.5497840576171875, 1.54772998046875, 1.5493929443359375, 1.547630615234375, 1.5480760498046875, 1.547937744140625, 1.5482552490234376, 1.5470531005859376, 1.5489443359375, 1.5480074462890625, 1.548041259765625, 1.5472659912109374, 1.549042724609375, 1.5482132568359375, 1.5479869384765625, 1.54851123046875, 1.5473919677734376, 1.5474892578125, 1.547378662109375, 1.548716064453125, 1.548441650390625, 1.5491409912109375, 1.5472435302734375, 1.5476378173828125, 1.5480125732421874, 1.5484580078125, 1.5489659423828126, 1.5496949462890626, 1.5483074951171876, 1.5473623046875, 1.547552734375, 1.547135009765625, 1.548559326171875, 1.5482398681640626, 1.5498638916015626, 1.54827880859375, 1.5483668212890624, 1.5489197998046875, 1.548549072265625, 1.54787939453125, 1.547683837890625, 1.549091796875, 1.5491134033203124, 1.548537841796875, 1.548310546875, 1.547894775390625, 1.54776171875, 1.5491932373046875, 1.5472711181640626, 1.547672607421875, 1.5473602294921875, 1.5482440185546875, 1.548180419921875, 3.190286376953125, 1.5480146484375, 1.5486883544921874, 1.5482091064453125, 1.54775244140625, 1.5473919677734376, 1.547894775390625, 1.5473858642578124, 1.548252197265625, 1.5479337158203126, 1.54754052734375, 1.5482122802734375, 1.54810986328125, 1.5476787109375, 1.5477442626953124, 1.54800634765625, 1.5478446044921874, 1.5483709716796874, 1.5485460205078125, 1.5486474609375, 1.5467080078125, 1.546982421875, 1.5480340576171876, 1.5470633544921875, 1.5475240478515624, 1.5482020263671874, 1.5471483154296874, 1.5476029052734375, 1.548863525390625, 1.5478087158203124, 1.5482347412109374, 1.54766845703125, 1.54899658203125, 1.5479388427734375, 1.547747314453125, 1.547916259765625, 1.5490928955078125, 1.548327880859375, 1.548775390625, 1.5474073486328126, 1.547236328125, 1.5476397705078124, 1.5474749755859376, 1.5477001953125, 1.5486075439453124, 1.54838427734375, 1.5483780517578125, 1.5483616943359375, 1.5482193603515626, 1.548074951171875, 1.547906005859375, 1.549001708984375, 1.5483565673828126, 1.5475179443359375, 1.5475548095703124, 1.5477432861328124, 1.54893310546875, 1.54842724609375, 1.5487344970703125, 1.5480279541015625, 1.5475323486328125, 1.5480032958984375, 1.5488101806640624, 3.191152587890625, 1.5476182861328125, 1.548537841796875, 1.5480074462890625, 1.54785791015625, 1.548368896484375, 1.5487181396484375, 1.548142578125, 1.5480279541015625, 1.547535400390625, 1.5482972412109375, 1.54859521484375, 1.5482921142578125, 1.5488890380859375, 1.5477381591796875, 1.5481619873046875, 1.54773193359375, 1.54867919921875, 1.547737060546875, 1.5486033935546875, 1.5482552490234376, 1.5477698974609375, 1.5484517822265624, 1.5468912353515625, 1.5473387451171876, 1.5476397705078124, 1.5484375, 1.547588623046875, 1.5468543701171875, 1.54747802734375, 1.547462646484375, 1.5476142578125, 1.5486832275390625, 1.5478487548828126, 1.54682373046875, 1.547431884765625, 1.54697216796875, 1.5476439208984376, 1.547420654296875, 1.547546630859375, 1.5470274658203125, 1.5474892578125, 1.547757568359375, 1.547947021484375, 1.5482930908203125, 1.549538330078125, 1.547505615234375, 1.548253173828125, 1.549297607421875, 1.547484130859375, 1.548220458984375, 1.548078125, 1.547826171875, 1.54688916015625, 1.5470919189453125, 1.5475538330078125, 1.54765625, 1.5476009521484375, 1.5491522216796876, 1.5480196533203125, 1.5481773681640625, 1.5486719970703124, 1.54946044921875, 3.19001904296875, 1.5467274169921874, 1.5485091552734376, 1.54712060546875, 1.5472425537109376, 1.548200927734375, 1.5474647216796875, 1.5484794921875, 1.5470428466796875, 1.5474134521484375, 1.5471728515625, 1.54741455078125, 1.547334716796875, 1.5476378173828125, 1.5483443603515625, 1.5476234130859374, 1.5473817138671875, 1.548241943359375, 1.548495849609375, 1.5476910400390624, 1.5492137451171875, 1.5486300048828125, 1.547925537109375, 1.5481497802734374, 1.5477288818359376, 1.547779052734375, 1.5494564208984376, 1.5475230712890624, 1.547504638671875, 1.5472803955078125, 1.5474217529296874, 1.54842724609375, 1.547715576171875, 1.54804736328125, 1.54720458984375, 1.5474442138671876, 1.5468114013671874, 1.548011474609375, 1.5478446044921874, 1.5482470703125, 1.548304443359375, 1.5482255859375, 1.549054931640625, 1.548583984375, 1.5483463134765625, 1.5490682373046876, 1.5485091552734376, 1.5480699462890626, 1.5479337158203126, 1.5475302734375, 1.5479091796875, 1.548273681640625, 1.5492208251953126, 1.547388916015625, 1.54747802734375, 1.5476285400390626, 1.54806787109375, 1.548291015625, 1.5489659423828126, 1.5477606201171874, 1.5477279052734374, 1.5477841796875, 1.5487047119140624, 3.189357666015625, 1.54832177734375, 1.54777294921875, 1.5475640869140626, 1.548190673828125, 1.54800439453125, 1.54889111328125, 1.548938232421875, 1.547832275390625, 1.5472568359375, 1.5471380615234376, 1.5473438720703125, 1.5473499755859375, 1.548291015625, 1.5484302978515625, 1.547525146484375, 1.54766748046875, 1.54760400390625, 1.54752001953125, 1.547726806640625, 1.54796435546875, 1.5488388671875, 1.5483873291015624, 1.54832177734375, 1.54785791015625, 1.548337158203125, 1.548981201171875, 1.5488040771484375, 1.5480023193359376, 1.547864013671875, 1.5482235107421876, 1.54768994140625, 1.5485205078125, 1.5483873291015624, 1.5479071044921875, 1.54815380859375, 1.5473756103515626, 1.5484302978515625, 1.5488572998046874, 1.54754248046875, 1.548105712890625, 1.5480648193359374, 1.548222412109375, 1.5480238037109375, 1.547357177734375, 1.5477821044921876, 1.5485142822265625, 1.54811181640625, 1.5482706298828126, 1.547442138671875, 1.547783203125, 1.548359619140625, 1.54857373046875, 1.5488572998046874, 1.5484447021484375, 1.5478978271484376, 1.5483546142578124, 1.5478446044921874, 1.54789990234375, 1.548568603515625, 1.5481129150390625, 1.5489515380859376, 1.5481129150390625, 3.18936376953125, 1.5473162841796875, 1.548347412109375, 1.5468809814453126, 1.5473695068359374, 1.5474237060546876, 1.5484447021484375, 1.548760009765625, 1.5489996337890626, 1.547483154296875, 1.547210693359375, 1.5470223388671875, 1.5469271240234375, 1.5475926513671876, 1.54830029296875, 1.5476080322265624, 1.547652099609375, 1.5482685546875, 1.547872314453125, 1.5475855712890625, 1.5479234619140625, 1.5485009765625, 1.54821630859375, 1.54749853515625, 1.54756298828125, 1.5474810791015625, 1.5483934326171875, 1.5483914794921876, 1.5475958251953126, 1.5481610107421875, 1.5478609619140624, 1.5482193603515626, 1.5480648193359374, 1.5481988525390624, 1.5481087646484375, 1.5474114990234376, 1.54781494140625, 1.5477237548828124, 1.5477821044921876, 1.5474083251953126, 1.5495556640625, 1.5478026123046875, 1.54785693359375, 1.547255859375, 1.54735302734375, 1.5477073974609374, 1.547969482421875, 1.5477288818359376, 1.5476070556640624, 1.5480606689453125, 1.548168212890625, 1.5479029541015625, 1.5480186767578126, 1.5485255126953126, 1.5468739013671875, 1.546857421875, 1.54753125, 1.5483514404296874, 1.54785888671875, 1.548099609375, 1.54840478515625, 1.54806884765625, 1.5480648193359374, 3.188536376953125, 1.547326416015625, 1.5486146240234375, 1.5495721435546874, 1.5473060302734376, 1.5487816162109376, 1.549453369140625, 1.5489608154296874, 1.5496785888671876, 1.5480616455078124, 1.54798486328125, 1.5475947265625, 1.549253662109375, 1.5481968994140625, 1.548053466796875, 1.549401123046875, 1.5481220703125, 1.547989990234375, 1.548347412109375, 1.5480863037109376, 1.548441650390625, 1.5491942138671875, 1.5479951171875, 1.5477596435546874, 1.5479132080078124, 1.5479019775390626, 1.5476695556640625, 1.5486505126953125, 1.548642333984375, 1.5483924560546876, 1.548464111328125, 1.5485399169921874, 1.5489935302734374, 1.5492823486328124, 1.549126708984375, 1.5488082275390624, 1.5485020751953125, 1.5481436767578125, 1.5480247802734375, 1.5473438720703125, 1.5484078369140626, 1.5497154541015625, 1.548389404296875, 1.548622802734375, 1.5483453369140625, 1.5479080810546875, 1.548316650390625, 1.548980224609375, 1.5480084228515625, 1.5479080810546875, 1.5483177490234374, 1.54800634765625, 1.5485972900390625, 1.5479234619140625, 1.5486607666015626, 1.548347412109375, 1.5482081298828125, 1.547925537109375, 1.548674072265625, 1.548291015625, 1.55034423828125, 1.54886865234375, 1.5481036376953126, 3.189357666015625, 1.5474586181640626, 1.5491717529296876, 1.5481129150390625, 1.548072998046875, 1.5476490478515625, 1.5485419921875, 1.5480074462890625, 1.54784765625, 1.5483238525390626, 1.547341796875, 1.547925537109375, 1.547810791015625, 1.5478056640625, 1.5477412109375, 1.54859521484375, 1.5481068115234375, 1.548200927734375, 1.547937744140625, 1.547672607421875, 1.547968505859375, 1.5481712646484376, 1.5496058349609374, 1.54743603515625, 1.5484375, 1.548396484375, 1.5478035888671875, 1.5489986572265626, 1.54951171875, 1.547946044921875, 1.5478077392578125, 1.5477493896484376, 1.5488133544921876, 1.5480872802734376, 1.5481773681640625, 1.54931103515625, 1.5479542236328125, 1.5490611572265625, 1.547883544921875, 1.547809814453125, 1.5479930419921875, 1.549116455078125, 1.5483084716796875, 1.54863720703125, 1.54834130859375, 1.548610595703125, 1.5488941650390624, 1.5497103271484376, 1.5490672607421876, 1.5479500732421876, 1.5488173828125, 1.5487139892578126, 1.5484384765625, 1.5487242431640624, 1.5489197998046875, 1.5499970703125, 1.5479736328125, 1.549170654296875, 1.548205078125, 1.5482030029296876, 1.54952294921875, 1.548652587890625, 1.5488470458984376, 3.190570068359375, 1.5475732421875, 1.54899560546875, 1.5490723876953125, 1.548262451171875, 1.5477442626953124, 1.5488890380859375, 1.5484302978515625, 1.5486300048828125, 1.548895263671875, 1.5487139892578126, 1.54817333984375, 1.548970947265625, 1.5481988525390624, 1.5474852294921875, 1.548536865234375, 1.5487886962890625, 1.548294189453125, 1.54817333984375, 1.547431884765625, 1.54737353515625, 1.5477801513671876, 1.5483740234375, 1.547725830078125, 1.547284423828125, 1.5479593505859375, 1.5474329833984375, 1.5482255859375, 1.54836376953125, 1.54859521484375, 1.5475343017578125, 1.5487181396484375, 1.548205078125, 1.5478056640625, 1.5484302978515625, 1.54993359375, 1.5491451416015625, 1.548178466796875, 1.5477718505859375, 1.5477421875, 1.5473060302734376, 1.548078125, 1.548304443359375, 1.5474052734375, 1.5479808349609374, 1.547030517578125, 1.547672607421875, 1.5480975341796874, 1.5481231689453125, 1.547429931640625, 1.5478343505859375, 1.5476173095703125, 1.5473643798828125, 1.547039794921875, 1.5472691650390624, 1.5484384765625, 1.5475506591796875, 1.548432373046875, 1.54777294921875, 1.5470867919921876, 1.54768896484375, 1.5496007080078125, 1.548421142578125, 3.189211181640625, 1.5468963623046874, 1.547869140625, 1.54876416015625, 1.5484989013671875, 1.5492301025390625, 1.5475916748046874, 1.546978271484375, 1.5469158935546874, 1.54728759765625, 1.5486658935546875, 1.5481077880859375, 1.5487529296875, 1.54709912109375, 1.547663330078125, 1.5471912841796875, 1.54923828125, 1.54927001953125, 1.54796435546875, 1.5475865478515625, 1.5476448974609376, 1.547410400390625, 1.548801025390625, 1.5483770751953125, 1.547400146484375, 1.5478026123046875, 1.54826953125, 1.547557861328125, 1.547953125, 1.549897705078125, 1.5482357177734376, 1.5481138916015624, 1.547904052734375, 1.5478404541015625, 1.5473480224609375, 1.54821630859375, 1.5495045166015624, 1.547509765625, 1.5476746826171874, 1.546893310546875, 1.54714013671875, 1.5480177001953126, 1.548304443359375, 1.547969482421875, 1.547404296875, 1.5485450439453126, 1.547778076171875, 1.5482039794921876, 1.549432861328125, 1.54890234375, 1.5484302978515625, 1.5484652099609375, 1.547947998046875, 1.5474166259765625, 1.547483154296875, 1.5496939697265626, 1.5482757568359375, 1.547537353515625, 1.5478927001953124, 1.54752001953125, 1.547925537109375, 1.5486146240234375, 1.5490406494140625]",tokens/s,0.6362950887053097,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1644.60544,7583.82592,0.0,6937.378816,6314.17344,s,10,6.228646179199219,0.6228646179199219,0.003273798456462667,0.6227339782714845,0.6241754699707032,0.6279462249755859,0.6309628289794922,"[0.6317169799804687, 0.6232648315429687, 0.6204146728515625, 0.6228704833984375, 0.6219456176757813, 0.6190134887695312, 0.6202999877929688, 0.6233375244140625, 0.6231851196289062, 0.6225974731445313]",tokens/s,411.0042417482646,kWh,7.31462190548579e-06,4.008082885946565e-06,3.373723287211802e-05,4.505993766355038e-05,tokens/kWh,5681321.663413707,MB,1644.60544,7583.82592,0.0,6937.378816,6464.046592,s,10,368.19280468750003,36.81928046875,0.018694382665316225,36.824119140625,36.835055078125,36.8438986328125,36.8509734765625,"[36.82331640625, 36.826859375, 36.8276015625, 36.8527421875, 36.83308984375, 36.78996875, 36.79416796875, 36.7968203125, 36.823703125, 36.82453515625]",tokens/s,1.7110600532639584,kWh,0.00043422340434458523,0.00023799224410691,0.0019653583664030845,0.00263757401485458,tokens/kWh,23885.58563482566,,s,629,373.2052468261721,0.5933310760352495,0.07394405347043131,0.5842933959960938,0.5858826293945313,0.58612470703125,1.20537375,"[0.5849108276367188, 0.5836380004882813, 0.5857188110351562, 0.5863209228515625, 0.5851494140625, 0.5851401977539062, 0.5856481323242188, 0.5834137573242187, 0.584342529296875, 0.58520166015625, 0.58519140625, 0.5853235473632813, 0.5863526611328125, 0.5858436889648437, 0.5854269409179688, 0.5851904296875, 0.5858662109375, 0.5851248779296875, 0.58393701171875, 0.586082275390625, 0.5860935668945313, 0.5858037719726562, 0.5858048095703124, 0.5849139404296875, 0.5841203002929688, 0.5845104370117188, 0.5843281860351562, 0.58593896484375, 0.5835950317382812, 0.5853214721679687, 0.5840045776367188, 0.58496923828125, 0.5845125122070313, 0.5841981201171875, 0.583709716796875, 0.583604248046875, 0.585080810546875, 0.5840435180664062, 0.5833502807617188, 0.583204833984375, 0.5833809814453125, 0.583488525390625, 0.5837742309570313, 0.5834803466796875, 0.5833226318359375, 0.5833451538085938, 0.5854525146484375, 0.5855877075195313, 0.5852989501953125, 0.5850931396484375, 0.5837567749023438, 0.5835980834960938, 0.583244873046875, 0.5846691284179687, 0.5844193115234375, 0.5830215454101563, 0.58328369140625, 0.5829970092773438, 0.5831393432617188, 0.5830184936523437, 0.583066650390625, 0.5831342163085937, 1.2092733154296875, 0.5848668212890625, 0.5853839721679688, 0.5835458374023438, 0.583478271484375, 0.5848524780273437, 0.5844121704101563, 0.5853265991210937, 0.5850194091796875, 0.5835120849609375, 0.5833359375, 0.5842094116210937, 0.5853716430664062, 0.5833635864257812, 0.583319580078125, 0.5834424438476562, 0.5833430786132813, 0.584900634765625, 0.5847019653320312, 0.5855590209960938, 0.585365478515625, 0.585439208984375, 0.5842821044921875, 0.5841654052734375, 0.5858457641601562, 0.5847449340820312, 0.585164794921875, 0.5848309936523437, 0.584369140625, 0.58404248046875, 0.5838244018554688, 0.583257080078125, 0.5842994995117188, 0.5846712036132813, 0.5851217651367188, 0.5858897705078125, 0.5863434448242187, 0.5853060913085938, 0.5856419677734375, 0.5848340454101563, 0.5839616088867188, 0.5841131591796875, 0.5834690551757813, 0.5832816772460937, 0.5835386962890625, 0.5836953735351562, 0.5837783203125, 0.5832489013671875, 0.5833348999023438, 0.5838141479492187, 0.58338818359375, 0.5840414428710937, 0.5849354248046875, 0.5850501098632812, 0.5841889038085938, 0.5833840942382813, 0.5846098022460937, 0.5854556274414062, 0.5855375366210938, 0.586071044921875, 0.586082275390625, 0.5858805541992187, 0.5854515380859375, 1.2056739501953124, 0.5838673706054688, 0.5836922607421875, 0.5847716064453125, 0.5860086059570313, 0.586287109375, 0.5854852905273438, 0.5859491577148438, 0.5838069458007813, 0.5842606201171875, 0.5834219360351562, 0.58568701171875, 0.5857372436523437, 0.5852446899414062, 0.58332568359375, 0.5833123779296875, 0.5834485473632812, 0.5832437744140625, 0.583193603515625, 0.5835929565429687, 0.5832130737304687, 0.5838284912109375, 0.58541259765625, 0.5862522583007812, 0.5836943359375, 0.5834444580078125, 0.5840834350585937, 0.5839165649414062, 0.5839165649414062, 0.585080810546875, 0.583625732421875, 0.5833779296875, 0.583635986328125, 0.5836810302734375, 0.585175048828125, 0.5848340454101563, 0.5836544189453124, 0.5838428344726563, 0.5849999389648437, 0.5866905517578125, 0.5863915405273438, 0.5861068725585937, 0.5854617309570312, 0.58519140625, 0.585206787109375, 0.5851094970703125, 0.5855795288085938, 0.5858048095703124, 0.5847920532226563, 0.58595947265625, 0.5849281616210937, 0.5841448974609375, 0.5848914184570313, 0.5851996459960938, 0.5850060424804687, 0.585902099609375, 0.5855529174804688, 0.5846098022460937, 0.5835745239257812, 0.5832652587890625, 0.5837926635742188, 0.5831690063476562, 0.5832499389648438, 1.207994384765625, 0.585818115234375, 0.5860628662109375, 0.5862369384765626, 0.5856450805664063, 0.5840588989257812, 0.5840947265625, 0.5850921020507812, 0.5846712036132813, 0.584900634765625, 0.5842124633789062, 0.585068603515625, 0.5859398803710938, 0.5848207397460937, 0.585101318359375, 0.5848822021484374, 0.5862144165039063, 0.5845022583007813, 0.58452685546875, 0.5850029907226563, 0.5852498168945313, 0.58498046875, 0.5852426147460937, 0.584975341796875, 0.5861365966796875, 0.5855333862304688, 0.5858826293945313, 0.5837987670898438, 0.5849784545898438, 0.584690673828125, 0.585865234375, 0.585797607421875, 0.58403125, 0.5845575561523437, 0.583736328125, 0.5857874145507812, 0.584921142578125, 0.5861068115234375, 0.5857382202148438, 0.5839708251953125, 0.5849497680664062, 0.5862993774414063, 0.5846947631835937, 0.5865574340820312, 0.5856399536132812, 0.5843414916992188, 0.5835376586914063, 0.5834024658203125, 0.5844940795898438, 0.5834383544921875, 0.584827880859375, 0.5836144409179688, 0.5839237060546875, 0.5843804321289062, 0.5840660400390625, 0.5849600219726563, 0.5857423095703125, 0.583419921875, 0.5841243896484375, 0.5841254272460937, 0.5858836669921875, 0.586166259765625, 0.5862020874023437, 1.2080977783203124, 0.5857433471679687, 0.5837967529296875, 0.5849077758789063, 0.5842442016601562, 0.5845339965820312, 0.5844049682617187, 0.584521728515625, 0.585860107421875, 0.5852057495117188, 0.5834055786132812, 0.5845022583007813, 0.5843896484375, 0.584616943359375, 0.5853900756835938, 0.585218017578125, 0.584774658203125, 0.583119873046875, 0.5841193237304687, 0.583546875, 0.5849262084960938, 0.5859512329101563, 0.5857454223632812, 0.5856983032226563, 0.5860689697265625, 0.584722412109375, 0.585270263671875, 0.5859860229492188, 0.5858826293945313, 0.5849948120117188, 0.5863117065429687, 0.5867387084960938, 0.5861365966796875, 0.583546875, 0.583605224609375, 0.5836113891601562, 0.58324169921875, 0.5857771606445312, 0.5841275024414062, 0.583731201171875, 0.5836175537109375, 0.5853092041015625, 0.5840271606445312, 0.583151611328125, 0.5838561401367187, 0.5832478637695313, 0.5832324829101563, 0.5840947265625, 0.5859563598632812, 0.5858385620117188, 0.5836267700195312, 0.5830523071289062, 0.58344140625, 0.5830737915039063, 0.5846456298828125, 0.58616015625, 0.5856010131835937, 0.584089599609375, 0.5855467529296875, 0.5856133422851563, 0.5847756958007813, 0.583384033203125, 0.583277587890625, 1.204601806640625, 0.5862328491210937, 0.5855662231445312, 0.5857454223632812, 0.585776123046875, 0.5854013671875, 0.5835397338867188, 0.5845718994140625, 0.584806396484375, 0.5833901977539062, 0.58340966796875, 0.5848545532226562, 0.5836718139648438, 0.583673828125, 0.5833185424804688, 0.5834208984375, 0.5831044921875, 0.58376806640625, 0.5851791381835938, 0.584932373046875, 0.5834536743164063, 0.58336767578125, 0.5833983764648437, 0.583994384765625, 0.5848350830078125, 0.5854105834960938, 0.5858877563476562, 0.584690673828125, 0.5837537231445312, 0.5838837890625, 0.5847900390625, 0.5848023071289062, 0.5842175903320312, 0.5831762084960938, 0.5833820190429687, 0.5859891357421875, 0.5835069580078125, 0.5831127319335937, 0.5831157836914063, 0.5827809448242187, 0.58313623046875, 0.58292431640625, 0.5838295288085937, 0.5834178466796875, 0.5833871459960938, 0.5833328857421874, 0.5838223266601562, 0.5833430786132813, 0.58349462890625, 0.5832028198242187, 0.583478271484375, 0.5832816772460937, 0.5838448486328125, 0.58444287109375, 0.5835448608398438, 0.583488525390625, 0.5832765502929688, 0.5831597900390625, 0.5829530029296875, 0.5834905395507812, 0.5833994140625, 0.583025634765625, 0.5832243041992188, 1.203841064453125, 0.5830482177734375, 0.58326220703125, 0.5829181518554688, 0.5830656127929688, 0.5832324829101563, 0.5837772827148437, 0.5836277465820312, 0.5839247436523437, 0.5832898559570312, 0.5831966552734374, 0.5831823120117188, 0.5834014892578125, 0.5832069091796875, 0.58324169921875, 0.5832232666015625, 0.5835601806640625, 0.5835192260742188, 0.5839534301757813, 0.583889892578125, 0.5849528198242188, 0.583784423828125, 0.5851146240234375, 0.584163330078125, 0.5861038208007813, 0.5857628173828126, 0.5840619506835938, 0.5834321899414062, 0.5833062133789062, 0.5850715942382813, 0.5838827514648437, 0.5832939453125, 0.5836646118164063, 0.5837035522460937, 0.5832540283203125, 0.58330419921875, 0.5831854248046875, 0.5831260375976562, 0.5833052368164062, 0.5834301147460937, 0.5840445556640625, 0.5837404174804688, 0.5842206420898437, 0.5848719482421875, 0.5832294311523437, 0.5834393310546875, 0.5849579467773437, 0.585996337890625, 0.5837516479492187, 0.5843865356445312, 0.5841224365234375, 0.58503466796875, 0.5842933959960938, 0.5842514038085938, 0.5854228515625, 0.5845718994140625, 0.5853296508789062, 0.58530712890625, 0.585987060546875, 0.5857822875976563, 0.5840445556640625, 0.5849928588867187, 0.5844633178710937, 1.2071761474609375, 0.5841336059570312, 0.5841234130859375, 0.5854689331054688, 0.5842175903320312, 0.5836083374023437, 0.5841305541992188, 0.5834137573242187, 0.583314453125, 0.5843251342773438, 0.584774658203125, 0.5859430541992188, 0.58572802734375, 0.5840137939453125, 0.583277587890625, 0.583404541015625, 0.58437939453125, 0.5849334106445313, 0.5850767211914063, 0.5838622436523437, 0.5835120849609375, 0.583267333984375, 0.5832949829101562, 0.5855610961914063, 0.583498779296875, 0.5834619140625, 0.58427392578125, 0.584437744140625, 0.5862256469726562, 0.5834854125976563, 0.5835243530273437, 0.5841622924804688, 0.58366259765625, 0.584537109375, 0.5853388671875, 0.5849989013671875, 0.5844951171875, 0.5839963989257813, 0.5834158325195312, 0.583077880859375, 0.5831526489257812, 0.5830082397460937, 0.5828720703125, 0.5832632446289062, 0.5835222778320313, 0.5850848999023438, 0.5834332275390625, 0.58313623046875, 0.5834127197265625, 0.58305126953125, 0.5829222412109375, 0.5836380004882813, 0.5834926147460937, 0.5847183227539062, 0.58433740234375, 0.5834075927734375, 0.583525390625, 0.585175048828125, 0.5840414428710937, 0.5833728637695312, 0.5833543090820312, 0.5860372314453125, 0.58665673828125, 1.209406494140625, 0.5842022705078125, 0.5846261596679687, 0.5852323608398438, 0.5844152221679687, 0.5856993408203125, 0.5855590209960938, 0.5855303955078125, 0.5856942138671875, 0.5849548950195312, 0.584184814453125, 0.583784423828125, 0.5838602905273438, 0.583943115234375, 0.5855211791992188, 0.5860454711914063, 0.5839083251953125, 0.5838970947265625, 0.58345166015625, 0.584595458984375, 0.5834035034179688, 0.583773193359375, 0.5848862915039063, 0.5850439453125, 0.5842882690429687, 0.5836083374023437, 0.5860198364257813, 0.5854996337890624, 0.5847982177734375, 0.58365234375, 0.5836318969726563, 0.5851351318359375, 0.5849569091796875, 0.5841541137695313, 0.5852262573242187, 0.5854945068359375, 0.58353662109375, 0.5831260375976562, 0.5857720336914063, 0.5833697509765625, 0.5847398681640625, 0.5836799926757813, 0.5841725463867188, 0.5835213012695313, 0.5834793090820313, 0.5834086303710937, 0.5851555786132813, 0.5856593627929687, 0.586039306640625, 0.586672119140625, 0.5860044555664062, 0.583857177734375, 0.5836093139648437, 0.5832765502929688, 0.583372802734375, 0.5853153076171875, 0.5847501220703125, 0.5838038330078125, 0.5834752197265625, 0.5835591430664062, 0.5838551635742187, 0.5840281372070313, 0.5836544189453124, 1.2069908447265625, 0.5845330200195312, 0.5855457153320313, 0.58528564453125, 0.5856204833984375, 0.5856962280273438, 0.5844951171875, 0.585575439453125, 0.5854146728515625, 0.5844561767578125, 0.5846773681640625, 0.5847982177734375, 0.5842196655273437, 0.5833287963867188, 0.5843486938476562, 0.5833656616210937, 0.5831823120117188, 0.5832847290039063, 0.5848411865234375, 0.585359375, 0.5844951171875, 0.5839882202148438, 0.58340966796875, 0.5836564331054688, 0.5833912353515625, 0.5853634643554687, 0.5840670776367187, 0.5857935180664062, 0.584247314453125, 0.5839431762695313, 0.5854044189453125, 0.5834598388671876, 0.5832744750976563, 0.584158203125, 0.5852743530273438, 0.5853818969726563, 0.5850951538085938, 0.5850203857421875, 0.584026123046875, 0.5849088134765625, 0.5856348266601562, 0.5836636352539063, 0.5857105712890625, 0.58444287109375, 0.58351513671875, 0.5845309448242187, 0.5851401977539062, 0.5842554931640624, 0.5832396850585938, 0.583677978515625, 0.58317822265625, 0.5831372680664062, 0.5843138427734375, 0.5847398681640625, 0.5867151489257812, 0.5837537231445312, 0.58568603515625, 0.584394775390625, 0.5833359375, 0.5846251220703125, 0.5849108276367188, 0.5853767700195313, 0.5857874145507812]",tokens/s,1.6853996704204155,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1312.935936,1023.934464,0.0,377.48736,290.348032,s,10,0.6976366119384766,0.06976366119384766,0.0018590846735664541,0.0696058578491211,0.07176354598999024,0.07230553245544434,0.07273912162780762,"[0.07284751892089844, 0.06800761413574219, 0.06769139099121094, 0.06779033660888673, 0.07104889678955079, 0.07164310455322266, 0.07125389099121093, 0.06824649810791016, 0.07096521759033203, 0.06814214324951172]",tokens/s,3669.5321836488733,kWh,8.134236630148721e-07,4.4571909736688417e-07,2.1182893658638067e-06,3.377432126245563e-06,tokens/kWh,75797230.09402885,MB,1312.935936,1023.934464,0.0,377.48736,337.281536,s,10,43.73100244140625,4.373100244140625,0.07142248616314475,4.400765380859375,4.4402288085937505,4.445402490234375,4.449541435546875,"[4.35173583984375, 4.25024462890625, 4.26398681640625, 4.30877587890625, 4.41468994140625, 4.4390791015625, 4.43881396484375, 4.42625927734375, 4.3868408203125, 4.450576171875]",tokens/s,14.40625562709468,kWh,5.082961926562783e-05,2.7857636841584544e-05,0.0001254377571233551,0.00020412501323056748,tokens/kWh,308634.39518232364,,s,629,44.28504171752928,0.07040547172898139,0.008395184351578173,0.07026687622070313,0.07091015625,0.07132590332031251,0.13567237548828126,"[0.0723609619140625, 0.07140863800048829, 0.07176195526123047, 0.07163388824462891, 0.07094169616699218, 0.07094783782958984, 0.07148441314697265, 0.07121202850341797, 0.07107174682617187, 0.07111577606201172, 0.07114342498779297, 0.07076761627197266, 0.07108198547363281, 0.07081574249267578, 0.07077072143554687, 0.07092835235595703, 0.07097344207763671, 0.0714598388671875, 0.07095500946044922, 0.07107481384277343, 0.07127347564697266, 0.07087923431396484, 0.07049420928955077, 0.07076659393310547, 0.07170150756835937, 0.071552001953125, 0.0690360336303711, 0.06716006469726563, 0.06747545623779297, 0.0673433609008789, 0.06707405090332032, 0.06747135925292969, 0.06733312225341796, 0.06719999694824219, 0.06728806304931641, 0.0673095703125, 0.06710784149169922, 0.06706175994873047, 0.0676126708984375, 0.06733926391601562, 0.06688256072998047, 0.06733414459228515, 0.06721126556396484, 0.0674703369140625, 0.06733926391601562, 0.06750822448730469, 0.06778880310058594, 0.06814105224609375, 0.0675225601196289, 0.06693376159667969, 0.06731980895996094, 0.06751334381103516, 0.07044198608398437, 0.0684615707397461, 0.0671098861694336, 0.06701465606689454, 0.06718463897705078, 0.06769561767578125, 0.06716422271728516, 0.06652819061279297, 0.06804582214355469, 0.06770995330810547, 0.13573631286621093, 0.0672204818725586, 0.06833356475830078, 0.06807555389404298, 0.06771196746826172, 0.06805197143554688, 0.06715392303466797, 0.06708428955078125, 0.06746112060546874, 0.06681702423095703, 0.06697062683105469, 0.06744166564941406, 0.06732288360595703, 0.06716006469726563, 0.06725325012207031, 0.06715602874755859, 0.06704019165039063, 0.06741094207763672, 0.06709555053710937, 0.06650572967529297, 0.06745292663574219, 0.06664704132080078, 0.06701261138916016, 0.06722150421142578, 0.06713855743408204, 0.0671098861694336, 0.06744268798828125, 0.06728704071044922, 0.0696258544921875, 0.06788813018798828, 0.06791372680664062, 0.06849024200439453, 0.06736895751953124, 0.06989209747314454, 0.06821990203857421, 0.06771097564697266, 0.06738432312011719, 0.06751948547363282, 0.06726348876953125, 0.06710784149169922, 0.06749183654785157, 0.06747545623779297, 0.06864179229736328, 0.0675563507080078, 0.06683135986328125, 0.06710681915283204, 0.06729011535644532, 0.06674534606933594, 0.06728089904785156, 0.06722354888916016, 0.06724607849121093, 0.06693990325927734, 0.0674703369140625, 0.06723481750488282, 0.06712223815917968, 0.06780818939208984, 0.06794751739501953, 0.06755840301513671, 0.0675758056640625, 0.06719692993164063, 0.0672573471069336, 0.06723481750488282, 0.06736589050292968, 0.13550796508789062, 0.06692147064208985, 0.06734950256347656, 0.07442227172851562, 0.0679557113647461, 0.06727680206298828, 0.06684569549560547, 0.06775091552734375, 0.06815436553955079, 0.06903807830810547, 0.06718975830078125, 0.06693376159667969, 0.06734031677246094, 0.06811030578613281, 0.06972621154785157, 0.06792601776123047, 0.06736486053466798, 0.06915481567382813, 0.06836736297607422, 0.06893875122070313, 0.07033753967285156, 0.06752665710449218, 0.0672194595336914, 0.06725325012207031, 0.06720822143554687, 0.06680572509765625, 0.06687026977539062, 0.0672143325805664, 0.06730445098876953, 0.06715904235839844, 0.06759014129638671, 0.06702899169921875, 0.06747443389892578, 0.06825984191894531, 0.0673280029296875, 0.06705152130126953, 0.06734130859375, 0.06769664001464844, 0.06806425476074218, 0.06706380462646484, 0.06730137634277343, 0.0671825942993164, 0.0676157455444336, 0.06711398315429687, 0.06727577972412109, 0.06933606719970703, 0.06894182586669922, 0.06846259307861328, 0.06729523468017579, 0.06725836944580078, 0.06733312225341796, 0.0672511978149414, 0.0672890853881836, 0.06693376159667969, 0.0676341781616211, 0.06666957092285156, 0.06659993743896485, 0.0665722885131836, 0.0667514877319336, 0.0672368621826172, 0.06741712188720703, 0.06712726593017578, 0.06705049896240234, 0.1353912353515625, 0.0671488037109375, 0.06749900817871093, 0.0674703369140625, 0.06737715148925781, 0.0673064956665039, 0.06735257720947266, 0.06739974212646484, 0.0676075210571289, 0.07006716918945312, 0.06824960327148437, 0.06842777252197266, 0.06737100982666015, 0.06794239807128906, 0.06855276489257812, 0.0674927978515625, 0.06724813079833984, 0.06728294372558594, 0.06814617919921875, 0.06813286590576172, 0.06723583984375, 0.06725222778320313, 0.06753689575195312, 0.06841139221191406, 0.06750617980957031, 0.06819737243652343, 0.06738438415527344, 0.06882195281982421, 0.06757997131347657, 0.06839907073974609, 0.06708633422851562, 0.06791574096679688, 0.06755020904541016, 0.06742527770996094, 0.06748569488525391, 0.066587646484375, 0.06756761932373047, 0.06675154876708984, 0.06733510589599609, 0.06650163269042969, 0.06747647857666016, 0.06739250946044922, 0.06733824157714843, 0.07072870635986328, 0.07021260833740234, 0.07095807647705078, 0.07044915008544922, 0.07039590454101563, 0.07007437133789063, 0.06753177642822265, 0.06737203216552734, 0.07054438018798828, 0.06973747253417968, 0.07040415954589843, 0.07051052856445313, 0.0710830078125, 0.0708136978149414, 0.06919782257080079, 0.06926131439208984, 0.06997299194335938, 0.07050035095214843, 0.0701286392211914, 0.07048499298095703, 0.14131712341308594, 0.06722457885742188, 0.06749183654785157, 0.06760550689697266, 0.07033446502685547, 0.07015731048583984, 0.07011225891113282, 0.06978765106201172, 0.0705269775390625, 0.06983372497558593, 0.06974156951904296, 0.07031603240966797, 0.07043583679199218, 0.0705802230834961, 0.07040614318847656, 0.07013069152832031, 0.0699504623413086, 0.07051058959960937, 0.07024332427978516, 0.06977126312255859, 0.06987673950195313, 0.07046553802490234, 0.07214806365966797, 0.07040809631347657, 0.06932179260253907, 0.07022994995117188, 0.07043071746826172, 0.07018402862548828, 0.07030262756347656, 0.07083622741699219, 0.07088333129882812, 0.06744166564941406, 0.06735155487060547, 0.06759731292724609, 0.06742733001708984, 0.0693780517578125, 0.07071334075927735, 0.07068876647949218, 0.07128985595703125, 0.07055465698242187, 0.07048291015625, 0.07026483154296875, 0.07063346862792969, 0.07024947357177734, 0.0704563217163086, 0.07028530883789062, 0.0700212173461914, 0.07084226989746094, 0.0705269775390625, 0.07041228485107422, 0.07060889434814453, 0.07039590454101563, 0.07050342559814453, 0.0704686050415039, 0.07056486511230468, 0.07071641540527343, 0.07057920074462891, 0.07043276977539062, 0.07038159942626954, 0.07048802947998047, 0.06970470428466796, 0.07084134674072265, 0.07183257293701172, 0.14264422607421876, 0.07059661102294922, 0.0704337921142578, 0.07088127899169921, 0.07030681610107421, 0.07067340850830078, 0.07019110107421875, 0.06985215759277344, 0.07026483154296875, 0.07035391998291016, 0.0703815689086914, 0.07054847717285156, 0.07035903930664063, 0.07019519805908203, 0.0714567642211914, 0.0703662109375, 0.07029964447021485, 0.07025766754150391, 0.07012351989746093, 0.07053107452392578, 0.07036518096923829, 0.07038873291015625, 0.07040204620361327, 0.07045836639404297, 0.07014093017578125, 0.07181926727294922, 0.07061196899414063, 0.07031705474853515, 0.07053209686279296, 0.07042457580566407, 0.07070719909667969, 0.07003033447265625, 0.0699525146484375, 0.07056486511230468, 0.07099187469482422, 0.07034060668945312, 0.07056281280517578, 0.07041228485107422, 0.070940673828125, 0.07032319641113281, 0.0704901123046875, 0.07044915008544922, 0.07032627105712891, 0.07004057312011719, 0.07051776123046875, 0.07030169677734376, 0.07029043579101563, 0.07063859558105469, 0.07095500946044922, 0.07047993469238281, 0.0705125732421875, 0.07045734405517579, 0.07045222473144531, 0.07037542724609375, 0.07044096374511719, 0.0705638427734375, 0.07061196899414063, 0.07037747192382812, 0.07041024017333984, 0.06980403137207031, 0.07040614318847656, 0.07026585388183594, 0.07064780426025391, 0.14216192626953125, 0.07040102386474609, 0.07040819549560547, 0.07026483154296875, 0.07061504364013672, 0.07052496337890625, 0.07054332733154296, 0.07030989074707031, 0.07011020660400391, 0.07049727630615234, 0.07108812713623047, 0.07056179046630859, 0.07050444793701172, 0.07025971221923828, 0.07032217407226563, 0.07038566589355469, 0.07023513793945313, 0.07034368133544922, 0.07049625396728515, 0.07012556457519531, 0.07023721313476562, 0.06932579040527344, 0.07020543670654297, 0.07085977935791016, 0.07015219116210937, 0.07016242980957031, 0.07007129669189453, 0.07072870635986328, 0.07037542724609375, 0.07045836639404297, 0.07033446502685547, 0.07036313629150391, 0.07039180755615235, 0.07009587097167969, 0.07063346862792969, 0.0701839370727539, 0.07108812713623047, 0.07090892791748046, 0.07049727630615234, 0.07038259124755859, 0.07044403076171875, 0.0706519012451172, 0.070761474609375, 0.0694466552734375, 0.0697528305053711, 0.07026380920410157, 0.07064268493652344, 0.0705433578491211, 0.0703477783203125, 0.07133491516113281, 0.0704368667602539, 0.07131238555908204, 0.07170355224609375, 0.0705771484375, 0.07050752258300781, 0.07046451568603515, 0.07079840087890625, 0.07022892761230469, 0.07035084533691406, 0.07033344268798829, 0.0702402572631836, 0.07078604888916015, 0.07056896209716797, 0.13596263122558594, 0.06696959686279297, 0.06721331024169921, 0.06699417877197265, 0.06940467071533203, 0.07078604888916015, 0.07069593811035156, 0.07035187530517578, 0.07053107452392578, 0.07046246337890626, 0.0706344985961914, 0.07024642944335938, 0.07162467193603515, 0.07080652618408204, 0.07045529937744141, 0.07055359649658204, 0.0706519012451172, 0.07040102386474609, 0.06990335845947265, 0.07026080322265625, 0.07056377410888671, 0.07085670471191406, 0.06976921844482421, 0.07021260833740234, 0.06979583740234375, 0.07011634826660157, 0.0700426254272461, 0.07040716552734375, 0.07058329772949219, 0.0699658203125, 0.07026892852783204, 0.0703272933959961, 0.07030989074707031, 0.07069593811035156, 0.07077683258056641, 0.07038259124755859, 0.07024230194091798, 0.07033548736572266, 0.07012454223632812, 0.07299378967285156, 0.07067033386230469, 0.07059865570068359, 0.07034368133544922, 0.07035699462890625, 0.07052902221679687, 0.070687744140625, 0.06977433776855468, 0.07029862213134766, 0.0704901123046875, 0.07047270202636718, 0.07048191833496094, 0.07052799987792968, 0.0706170883178711, 0.07022182464599609, 0.0703272933959961, 0.070255615234375, 0.07052082824707032, 0.07059967803955078, 0.07068160247802735, 0.0705054702758789, 0.07004364776611328, 0.07048089599609375, 0.07136255645751953, 0.14220700073242187, 0.07030985260009766, 0.07026892852783204, 0.0702033920288086, 0.07120588684082031, 0.07035289764404297, 0.07038361358642578, 0.07045222473144531, 0.070181884765625, 0.07042969512939454, 0.07047577667236328, 0.07076150512695313, 0.07077168273925781, 0.07013990020751953, 0.07032422637939453, 0.07050752258300781, 0.07027200317382812, 0.07128268432617188, 0.070614013671875, 0.07047475433349609, 0.07060889434814453, 0.07019827270507813, 0.07051264190673828, 0.07065702056884765, 0.07134515380859376, 0.07106253051757813, 0.07054950714111329, 0.07042253112792969, 0.07038873291015625, 0.0702740478515625, 0.07026687622070313, 0.06861004638671875, 0.0671272964477539, 0.06711507415771484, 0.0671866226196289, 0.06729523468017579, 0.06710169219970703, 0.06714166259765625, 0.06752764892578125, 0.06734130859375, 0.06720921325683593, 0.06747135925292969, 0.06729933166503907, 0.0674150390625, 0.06712422180175781, 0.06744882965087891, 0.06737612915039062, 0.06713958740234376, 0.06726656341552735, 0.0685823974609375, 0.07029043579101563, 0.07070515441894532, 0.0720742416381836, 0.07064166259765625, 0.07065087890625, 0.07042559814453125, 0.07116806030273437, 0.07063648223876953, 0.07048703765869141, 0.07075430297851562, 0.07056896209716797, 0.07044608306884766, 0.07033036804199219, 0.13839053344726562, 0.07068978881835937, 0.07101030731201172, 0.07077279663085938, 0.07043782043457031, 0.070614013671875, 0.07037542724609375, 0.07067443084716797, 0.070181884765625, 0.07043788909912109, 0.07035391998291016, 0.0702003173828125, 0.07099903869628907, 0.07056690979003906, 0.07074406433105469, 0.07023308563232422, 0.07023616027832032, 0.0702208023071289, 0.070150146484375, 0.0707799072265625, 0.07061199951171875, 0.0703927993774414, 0.07016448211669922, 0.07032115173339844, 0.07010406494140625, 0.07089151763916016, 0.07061913299560547, 0.07181926727294922, 0.07085977935791016, 0.07089663696289063, 0.07025459289550781, 0.07071952056884766, 0.07063139343261719, 0.07043276977539062, 0.07035391998291016, 0.07057920074462891, 0.07042867279052735, 0.07073075103759766, 0.07066726684570312, 0.07058124542236328, 0.07049215698242188, 0.07038668823242188, 0.07035903930664063, 0.07047987365722656, 0.07088025665283203, 0.07053414154052734, 0.07050342559814453, 0.07065395355224609, 0.07077375793457032, 0.07083213043212891, 0.070761474609375, 0.07080242919921875, 0.07068364715576173, 0.0717496337890625, 0.07135539245605468, 0.07119155120849609, 0.07112703704833985, 0.07091506958007812, 0.07129190063476562, 0.07068876647949218, 0.07067545318603516, 0.07053619384765625, 0.07090688323974609]",tokens/s,14.20344151445213,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpfnhjd17i/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1207.41888,879.230976,0.0,232.783872,169.719808,s,10,0.3329092750549316,0.03329092750549316,0.001090057705435725,0.0329870719909668,0.033882015228271486,0.0350985279083252,0.036071738052368164,"[0.03631504058837891, 0.03281526565551758, 0.03245974349975586, 0.03361167907714844, 0.03344601440429688, 0.033440895080566406, 0.032671966552734376, 0.033158878326416015, 0.03245904159545898, 0.0325307502746582]",tokens/s,7689.782748100329,kWh,3.9167167718924527e-07,2.1461758953218489e-07,8.304311279209363e-07,1.4367203946423665e-06,tokens/kWh,178183591.57052574,MB,1207.41888,879.230976,0.0,232.783872,199.792128,s,10,20.506330566406252,2.050633056640625,0.01795433438952847,2.0542513427734375,2.07115517578125,2.0720841796875,2.0728273828125,"[2.05074072265625, 2.0363714599609377, 2.0684541015625, 2.054240966796875, 2.07094873046875, 2.07301318359375, 2.05426171875, 2.0559150390625, 2.0203577880859376, 2.02202685546875]",tokens/s,30.72222004613905,kWh,2.4533342500768937e-05,1.3444361220557862e-05,5.009960961606961e-05,8.80773133373964e-05,tokens/kWh,715280.6734540922,,s,629,20.768719875335666,0.03301863255220301,0.003999927492502233,0.03273830413818359,0.033139096832275394,0.033337548065185546,0.06495629302978514,"[0.033740798950195314, 0.034253822326660154, 0.0341104621887207, 0.03412275314331055, 0.0335206413269043, 0.03323699188232422, 0.03351859283447266, 0.03322982406616211, 0.033159168243408206, 0.03280998229980469, 0.03333222579956055, 0.033258495330810545, 0.032756736755371094, 0.03297075271606445, 0.03364352035522461, 0.03362918472290039, 0.033584129333496096, 0.03300249481201172, 0.03202867126464844, 0.031971328735351565, 0.031821823120117186, 0.031848447799682614, 0.03192934417724609, 0.03202560043334961, 0.03206041717529297, 0.032089088439941404, 0.03197644805908203, 0.03203379058837891, 0.03201638412475586, 0.031941631317138675, 0.03214438247680664, 0.03217407989501953, 0.03201638412475586, 0.031987712860107424, 0.03198054313659668, 0.03194572830200195, 0.03189145660400391, 0.03199590492248535, 0.03189043235778809, 0.03177676773071289, 0.032366592407226565, 0.03329228973388672, 0.0331960334777832, 0.0321341438293457, 0.032966655731201173, 0.03283865737915039, 0.03283456039428711, 0.032866302490234374, 0.032574462890625, 0.03194470405578613, 0.032132095336914065, 0.032039936065673826, 0.03172454452514648, 0.03170099258422852, 0.03186278343200684, 0.03212492752075195, 0.031920127868652344, 0.032054271697998044, 0.03213926315307617, 0.03209830474853516, 0.03199795150756836, 0.03198873519897461, 0.06604799652099609, 0.031955968856811526, 0.03163852882385254, 0.03178700828552246, 0.03185971260070801, 0.03197337532043457, 0.03201945495605469, 0.031987712860107424, 0.032059391021728514, 0.03199795150756836, 0.03196211242675781, 0.03191500854492187, 0.03182694435119629, 0.031764480590820314, 0.031714303970336914, 0.0317573127746582, 0.031927295684814457, 0.03189145660400391, 0.03197235107421875, 0.03200921630859375, 0.03191500854492187, 0.03215462493896484, 0.03199795150756836, 0.03201126480102539, 0.031936511993408204, 0.03203071975708008, 0.03208294296264649, 0.031936511993408204, 0.032161792755126956, 0.03201331329345703, 0.031562751770019534, 0.03183923149108887, 0.0325928955078125, 0.03195187187194824, 0.03276287841796875, 0.03203276824951172, 0.03358924865722656, 0.032881664276123046, 0.033119232177734374, 0.03294617462158203, 0.03283251190185547, 0.03209011077880859, 0.032072704315185545, 0.03290934371948242, 0.03280073547363281, 0.032763904571533206, 0.03282329559326172, 0.03290419387817383, 0.032740352630615234, 0.032478206634521486, 0.03298611068725586, 0.03275059127807617, 0.033102848052978515, 0.03289395141601562, 0.03265331268310547, 0.032115745544433597, 0.03197641563415527, 0.03259494400024414, 0.03297894287109375, 0.03300044631958008, 0.03283865737915039, 0.03288780975341797, 0.03242803192138672, 0.06496460723876953, 0.03309568023681641, 0.03194470405578613, 0.03273830413818359, 0.03288678359985352, 0.03281919860839844, 0.03297689437866211, 0.032912384033203124, 0.032787487030029296, 0.033037311553955076, 0.03294307327270508, 0.032863231658935545, 0.03278847885131836, 0.03306496047973633, 0.032745471954345705, 0.03307212829589844, 0.03291852951049805, 0.03285094451904297, 0.03298406219482422, 0.03316223907470703, 0.03301683044433594, 0.0328611831665039, 0.03299532699584961, 0.03296051025390625, 0.03287449645996094, 0.0329615364074707, 0.03278745651245117, 0.032949249267578126, 0.033023998260498046, 0.03287449645996094, 0.032909313201904294, 0.03280179214477539, 0.0328089599609375, 0.03198464012145996, 0.032054271697998044, 0.03240140914916992, 0.03320729446411133, 0.033165313720703124, 0.03290726470947265, 0.032584705352783204, 0.032707584381103515, 0.032249855041503905, 0.03290521621704102, 0.0328458251953125, 0.032791553497314455, 0.03254579162597656, 0.03258879852294922, 0.03239424133300781, 0.03282227325439453, 0.032894977569580076, 0.03285811233520508, 0.03289708709716797, 0.032930816650390625, 0.03297683334350586, 0.033140735626220705, 0.032868350982666016, 0.03292364883422851, 0.032985088348388675, 0.033018878936767575, 0.0329431037902832, 0.032927745819091796, 0.032976993560791014, 0.03305363082885742, 0.06707913970947266, 0.03323494338989258, 0.03279257583618164, 0.033137664794921876, 0.0331776008605957, 0.03303014373779297, 0.033274879455566404, 0.0330967025756836, 0.033137664794921876, 0.03282534408569336, 0.03309056091308594, 0.033081344604492184, 0.03303424072265625, 0.033309696197509765, 0.03319500732421875, 0.03301171112060547, 0.033345535278320314, 0.03285094451904297, 0.03292876815795898, 0.03282124710083008, 0.03200307083129883, 0.0319866886138916, 0.032074752807617186, 0.031974399566650394, 0.03197644805908203, 0.03283967971801758, 0.03339263916015625, 0.033058815002441407, 0.032753662109375, 0.03252326583862305, 0.03210553741455078, 0.031921152114868165, 0.03210540771484375, 0.0323768310546875, 0.03219968032836914, 0.0321976318359375, 0.03212799835205078, 0.03213516616821289, 0.03212799835205078, 0.03211775970458984, 0.032985088348388675, 0.034108417510986325, 0.033040382385253905, 0.03294617462158203, 0.03294105529785156, 0.032906238555908206, 0.032069633483886716, 0.031744064331054686, 0.03175724792480469, 0.031899648666381834, 0.03206655883789063, 0.03191296005249023, 0.03192934417724609, 0.032126976013183595, 0.03269740676879883, 0.03293075180053711, 0.03260313415527344, 0.03196211242675781, 0.031904767990112305, 0.03212287902832031, 0.03203788757324219, 0.03282944107055664, 0.03291033554077148, 0.06675456237792969, 0.033050624847412106, 0.0328007698059082, 0.03285299301147461, 0.0328089599609375, 0.03288883209228516, 0.033067008972167966, 0.03282022476196289, 0.03274649429321289, 0.03173785591125488, 0.032020481109619144, 0.03192428779602051, 0.03253753662109375, 0.03285606384277344, 0.033326080322265625, 0.03281919860839844, 0.033073150634765625, 0.03273830413818359, 0.03295129776000977, 0.03292364883422851, 0.03303628921508789, 0.03282841491699219, 0.032919551849365236, 0.03291961669921875, 0.032923583984375, 0.032797695159912106, 0.03295948791503906, 0.03298303985595703, 0.032982078552246094, 0.03352467346191406, 0.032589824676513675, 0.033331199645996096, 0.03296255874633789, 0.032672767639160154, 0.032740352630615234, 0.03300864028930664, 0.03226521682739258, 0.03242803192138672, 0.03284275054931641, 0.03410636901855469, 0.03410943984985351, 0.032530433654785154, 0.032048126220703126, 0.032909313201904294, 0.03273625564575195, 0.03282022476196289, 0.033040382385253905, 0.03312844848632813, 0.033032222747802736, 0.03294512176513672, 0.032824321746826174, 0.032772159576416014, 0.032817089080810546, 0.03304959869384766, 0.032717823028564456, 0.0328007698059082, 0.03302195358276367, 0.03298713684082031, 0.03286937713623047, 0.032884735107421875, 0.032917503356933595, 0.03312947082519531, 0.0328243522644043, 0.06682825469970703, 0.032939006805419925, 0.03280998229980469, 0.0324136962890625, 0.032930816650390625, 0.03313868713378906, 0.03297382354736328, 0.032863231658935545, 0.03299327850341797, 0.03307212829589844, 0.03317145538330078, 0.033142784118652346, 0.032942081451416014, 0.033363967895507815, 0.03327897644042969, 0.03276595306396484, 0.03296255874633789, 0.033056766510009765, 0.03283967971801758, 0.03291648101806641, 0.03299225616455078, 0.032946239471435546, 0.032924606323242185, 0.03313868713378906, 0.03290521621704102, 0.03295641708374023, 0.03341516876220703, 0.0333199348449707, 0.03459379196166992, 0.03333631896972656, 0.032895999908447264, 0.03294412612915039, 0.03291648101806641, 0.03218227386474609, 0.032626686096191404, 0.0323061752319336, 0.03225804901123047, 0.03283148956298828, 0.033329151153564454, 0.03292879867553711, 0.03304956817626953, 0.03294822311401367, 0.03261030578613281, 0.03294105529785156, 0.032903167724609376, 0.03311206436157227, 0.03312025451660156, 0.03289190292358399, 0.03233280181884766, 0.032524288177490236, 0.03305779266357422, 0.03231129455566406, 0.0324505615234375, 0.03278950500488281, 0.03302707290649414, 0.03288678359985352, 0.032996353149414064, 0.03286937713623047, 0.03303014373779297, 0.03281203079223633, 0.03252633666992188, 0.032178176879882815, 0.03199084854125977, 0.065176513671875, 0.03205017471313477, 0.03209011077880859, 0.03212287902832031, 0.03207987213134766, 0.03216588973999023, 0.03170816040039062, 0.03165286445617676, 0.03220684814453125, 0.03201228713989258, 0.03200614547729492, 0.03260927963256836, 0.03219046401977539, 0.03249868774414062, 0.03227340698242188, 0.03234201431274414, 0.03262464141845703, 0.03326873779296875, 0.03293286514282227, 0.032851966857910156, 0.032830463409423825, 0.03336908721923828, 0.03311824035644531, 0.03295331192016602, 0.032176128387451174, 0.03207680130004883, 0.032168991088867185, 0.032793567657470706, 0.03295641708374023, 0.032803871154785155, 0.03245257568359375, 0.03248025512695313, 0.03281612777709961, 0.03295129776000977, 0.033175552368164066, 0.03283359909057617, 0.03288671875, 0.03277721786499024, 0.033051647186279294, 0.033258495330810545, 0.03284275054931641, 0.03292364883422851, 0.03303014373779297, 0.03285504150390625, 0.03174399948120117, 0.03240857696533203, 0.033037311553955076, 0.03285299301147461, 0.032894977569580076, 0.032917503356933595, 0.0321341438293457, 0.03308031845092774, 0.032146430969238284, 0.03266559982299805, 0.03310899353027344, 0.032740352630615234, 0.03243212890625, 0.03290828704833984, 0.032982017517089846, 0.032979969024658204, 0.032942081451416014, 0.032345088958740234, 0.03220172882080078, 0.06660710144042968, 0.032917537689208985, 0.032905185699462894, 0.032873470306396486, 0.033800193786621094, 0.0329615364074707, 0.03261542510986328, 0.03257753753662109, 0.0329881591796875, 0.03291545486450195, 0.032912384033203124, 0.032797695159912106, 0.03293183898925781, 0.033181697845458984, 0.03299020767211914, 0.03279564666748047, 0.03290726470947265, 0.03288883209228516, 0.03297177505493164, 0.03243929672241211, 0.032846847534179685, 0.03225600051879883, 0.03281510543823242, 0.033083393096923826, 0.0330332145690918, 0.032736320495605466, 0.032817089080810546, 0.03186278343200684, 0.03185766410827637, 0.03318483352661133, 0.03217606353759766, 0.03176038360595703, 0.032323585510253904, 0.03279052734375, 0.0322949104309082, 0.032471038818359374, 0.03288780975341797, 0.03274140930175781, 0.03304035186767578, 0.03282841491699219, 0.031971328735351565, 0.03230003356933594, 0.03287551879882813, 0.032753662109375, 0.03289907073974609, 0.03249356842041016, 0.03194367980957031, 0.0321710090637207, 0.03300352096557617, 0.03274649429321289, 0.0328540153503418, 0.03268710327148437, 0.03202764892578125, 0.032950271606445314, 0.03294515228271484, 0.03292671966552734, 0.03265740966796875, 0.03194675254821777, 0.03184127998352051, 0.03206860733032227, 0.03196108818054199, 0.03163750457763672, 0.031665151596069335, 0.0648058853149414, 0.03185663986206055, 0.031904767990112305, 0.031893503189086916, 0.032007167816162106, 0.0315729923248291, 0.031927295684814457, 0.03198259162902832, 0.0319682559967041, 0.03189967918395996, 0.03205014419555664, 0.031955968856811526, 0.03200614547729492, 0.03189657592773437, 0.03188121604919433, 0.03202150344848633, 0.0321638412475586, 0.031991840362548825, 0.03199894332885742, 0.03193343925476074, 0.03197542381286621, 0.0318525447845459, 0.03199078369140625, 0.03202252960205078, 0.03216281509399414, 0.03228876876831055, 0.032105472564697264, 0.0329697265625, 0.03331584167480469, 0.031987712860107424, 0.03201638412475586, 0.03187820816040039, 0.03171219253540039, 0.032074752807617186, 0.03211372756958008, 0.03188115119934082, 0.031936511993408204, 0.032328704833984374, 0.032024574279785153, 0.03211980819702148, 0.031898624420166014, 0.03198566436767578, 0.03184332847595215, 0.032132095336914065, 0.031905792236328126, 0.031921152114868165, 0.03194777679443359, 0.03189145660400391, 0.03162112045288086, 0.03145011138916016, 0.03202867126464844, 0.03241164779663086, 0.03273932647705078, 0.03275980758666992, 0.0327086067199707, 0.032747520446777346, 0.032508926391601564, 0.031954944610595705, 0.03173785591125488, 0.03213926315307617, 0.03191910362243652, 0.032194561004638675, 0.03189452743530274, 0.0649349136352539, 0.032023551940917966, 0.03198566436767578, 0.03202560043334961, 0.03201126480102539, 0.03183616065979004, 0.031854591369628905, 0.0317573127746582, 0.031936511993408204, 0.031903743743896484, 0.03194675254821777, 0.03200511932373047, 0.031974399566650394, 0.03211161422729492, 0.031942655563354495, 0.0321638412475586, 0.03193446350097656, 0.03189145660400391, 0.03212799835205078, 0.032, 0.03198361587524414, 0.031937536239624024, 0.032105472564697264, 0.03191193580627441, 0.031916032791137694, 0.03271680068969727, 0.0333383674621582, 0.03406950378417969, 0.03317657470703125, 0.03253247833251953, 0.03198975944519043, 0.03277926254272461, 0.03240140914916992, 0.03290521621704102, 0.03309465789794922, 0.032075775146484374, 0.03243110275268555, 0.03205017471313477, 0.03197747230529785, 0.03197235107421875, 0.031908863067626955, 0.03199283218383789, 0.03201126480102539, 0.03172659111022949, 0.03145113563537598, 0.031459327697753905, 0.031643648147583005, 0.03180441665649414, 0.032189441680908204, 0.031916032791137694, 0.031955968856811526, 0.031971328735351565, 0.03196723175048828, 0.03189657592773437, 0.031959039688110355, 0.03171327972412109, 0.031732736587524416, 0.03177369689941406, 0.032056320190429685, 0.03193343925476074, 0.03195084762573242, 0.03191910362243652, 0.03195084762573242]",tokens/s,30.28593017651422,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp_boz25wy/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3219-40ab0199517ffe2375162645;b985e9f5-d6f3-4ca5-a7f1-f638dfc22d74) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2112.14336,2911.371264,0.0,2264.92416,2140.72832,s,10,2.4878771514892577,0.24878771514892578,0.001481785323819343,0.24867384338378906,0.25056690216064453,0.25108961105346683,0.2515077781677246,"[0.2516123199462891, 0.25045074462890626, 0.24760211181640626, 0.24728652954101563, 0.24780889892578126, 0.24664930725097656, 0.2480397186279297, 0.24930796813964845, 0.24954534912109375, 0.24957420349121093]",tokens/s,1028.9897145715452,kWh,2.9131745419851165e-06,1.5962883600656e-06,1.3288858869561026e-05,1.7798321771611744e-05,tokens/kWh,14383378.572710099,MB,2112.14336,2911.371264,0.0,2264.92416,2246.907904,s,10,146.0569638671875,14.605696386718751,0.006079082022590375,14.60555517578125,14.610718359375001,14.61447392578125,14.617478378906249,"[14.5972685546875, 14.605400390625, 14.6057099609375, 14.6049326171875, 14.5980126953125, 14.599310546875, 14.6182294921875, 14.609376953125, 14.6088388671875, 14.6098837890625]",tokens/s,4.313385567653395,kWh,0.00017247067014820533,9.452784657802511e-05,0.0007735646039460372,0.0010405631206722677,tokens/kWh,60544.14071421071,,s,629,148.0586790771484,0.23538740711788306,0.029533502655254593,0.23172300720214845,0.23236076049804688,0.2326812683105469,0.4793023583984375,"[0.23325080871582032, 0.23149772644042968, 0.23179673767089845, 0.2320568389892578, 0.2313912353515625, 0.23193907165527344, 0.23138099670410156, 0.23167181396484374, 0.2314608612060547, 0.2322554931640625, 0.23149363708496093, 0.23134413146972657, 0.23120999145507812, 0.23181106567382812, 0.23132569885253906, 0.23138099670410156, 0.2312243194580078, 0.23141888427734375, 0.23158682250976562, 0.23283609008789063, 0.2315028533935547, 0.23156224060058594, 0.2313502655029297, 0.23152435302734375, 0.23198617553710937, 0.23152435302734375, 0.23150592041015625, 0.23154483032226564, 0.23148646545410156, 0.2315581512451172, 0.23163084411621093, 0.23152537536621093, 0.23172300720214845, 0.23159397888183594, 0.23167079162597656, 0.23191448974609374, 0.2319974365234375, 0.2318376922607422, 0.23134413146972657, 0.23214079284667968, 0.23153254699707032, 0.231510009765625, 0.23141171264648439, 0.23148544311523436, 0.23137586975097657, 0.23179263305664063, 0.23232511901855468, 0.23154483032226564, 0.23182234191894532, 0.23154483032226564, 0.23155506896972655, 0.23143218994140624, 0.23180184936523437, 0.23193702697753907, 0.2313113555908203, 0.23155711364746093, 0.23150489807128907, 0.23152333068847655, 0.231404541015625, 0.23146803283691406, 0.2314721221923828, 0.23182131958007812, 0.48205722045898436, 0.23162675476074218, 0.23151513671875, 0.23147418212890625, 0.23139430236816405, 0.23154176330566406, 0.2315345916748047, 0.231478271484375, 0.2315161590576172, 0.2314055633544922, 0.23177113342285155, 0.23148748779296874, 0.23155506896972655, 0.23154995727539063, 0.23152435302734375, 0.23150796508789062, 0.23173222351074219, 0.23166157531738282, 0.23167897033691406, 0.23310336303710938, 0.23195033264160156, 0.23181925964355468, 0.23237017822265624, 0.2315888671875, 0.23168307495117188, 0.23146905517578126, 0.23271218872070312, 0.23225343322753905, 0.2316738586425781, 0.23232205200195313, 0.23157145690917968, 0.23149465942382813, 0.23150489807128907, 0.23166873168945312, 0.23170457458496094, 0.23163084411621093, 0.23201075744628907, 0.23196978759765624, 0.23187762451171876, 0.23154893493652343, 0.23200358581542968, 0.23128781127929687, 0.2326876220703125, 0.23151513671875, 0.2320189514160156, 0.23194111633300782, 0.2343126983642578, 0.23182745361328125, 0.23166975402832032, 0.2321029052734375, 0.2315478973388672, 0.23180390930175782, 0.23187455749511718, 0.23265895080566407, 0.23204351806640625, 0.23161549377441407, 0.23184384155273438, 0.23168307495117188, 0.23198822021484375, 0.23221965026855468, 0.23208038330078126, 0.2315028533935547, 0.2317189178466797, 0.4791541748046875, 0.23152537536621093, 0.23141786193847655, 0.23147109985351563, 0.2316881866455078, 0.23219097900390626, 0.23250738525390624, 0.23175474548339844, 0.23157862854003905, 0.23146905517578126, 0.23150796508789062, 0.23139942932128907, 0.23163084411621093, 0.23131954956054687, 0.23146290588378907, 0.2314721221923828, 0.23149977111816405, 0.23143116760253907, 0.23153561401367187, 0.23136460876464843, 0.23145574951171874, 0.23262821960449218, 0.23162060546875, 0.23222067260742188, 0.2317332458496094, 0.2321459197998047, 0.23204658508300782, 0.23222169494628905, 0.23193394470214843, 0.23173222351074219, 0.23207321166992187, 0.2314967041015625, 0.23171379089355468, 0.23195033264160156, 0.2315335693359375, 0.23165440368652343, 0.23151513671875, 0.2319278106689453, 0.23251968383789062, 0.2320762939453125, 0.2323722229003906, 0.23198104858398438, 0.23364813232421874, 0.231699462890625, 0.23159397888183594, 0.2315478973388672, 0.23241932678222657, 0.2327490539550781, 0.2325544891357422, 0.23194828796386718, 0.2318673858642578, 0.2316810302734375, 0.23178445434570313, 0.23171685791015625, 0.2330030059814453, 0.23198924255371095, 0.23177932739257812, 0.2315345916748047, 0.2316759033203125, 0.23150898742675782, 0.23172915649414064, 0.23147622680664062, 0.23150898742675782, 0.4793599853515625, 0.23149055480957031, 0.23156224060058594, 0.23211724853515625, 0.23157350158691406, 0.2316636199951172, 0.2315335693359375, 0.2317004852294922, 0.23198104858398438, 0.2317076416015625, 0.2317066192626953, 0.2316810302734375, 0.23214694213867187, 0.2314281005859375, 0.23171379089355468, 0.23145368957519533, 0.23175474548339844, 0.2320343017578125, 0.23201075744628907, 0.2313799743652344, 0.23178445434570313, 0.2324234313964844, 0.2315724792480469, 0.23148133850097657, 0.23172813415527344, 0.23166053771972656, 0.23147314453125, 0.2317332458496094, 0.2317066192626953, 0.23179878234863283, 0.23192472839355469, 0.231546875, 0.23191448974609374, 0.2325841979980469, 0.232310791015625, 0.23168716430664063, 0.23217356872558595, 0.23178752136230468, 0.23181004333496094, 0.23151922607421874, 0.23181517028808593, 0.23169024658203125, 0.23222067260742188, 0.23224114990234376, 0.233280517578125, 0.23156326293945312, 0.23260774230957032, 0.23164927673339844, 0.23165132141113282, 0.23178752136230468, 0.2314833984375, 0.23191654968261718, 0.23174041748046875, 0.23182847595214845, 0.23188172912597657, 0.23205477905273436, 0.23170252990722656, 0.23150592041015625, 0.23171072387695313, 0.23180697631835936, 0.23188787841796876, 0.23198924255371095, 0.2316083221435547, 0.48040447998046876, 0.2314649658203125, 0.23149465942382813, 0.23155506896972655, 0.23165338134765626, 0.23153050231933595, 0.23216741943359376, 0.23149977111816405, 0.2316738586425781, 0.2315141143798828, 0.23169229125976562, 0.23125503540039063, 0.23212237548828124, 0.231546875, 0.23149363708496093, 0.2313123779296875, 0.2316011505126953, 0.23193292236328125, 0.23160934448242188, 0.23140045166015624, 0.23142399597167967, 0.2315028533935547, 0.23264256286621093, 0.23136256408691405, 0.23141273498535156, 0.23175270080566407, 0.2318970947265625, 0.23143218994140624, 0.231625732421875, 0.23146803283691406, 0.23153561401367187, 0.23179263305664063, 0.23162265014648437, 0.23143833923339843, 0.23150489807128907, 0.2316769256591797, 0.2315028533935547, 0.23140351867675782, 0.23153254699707032, 0.23176396179199218, 0.23235379028320313, 0.23157145690917968, 0.23215000915527345, 0.23228416442871094, 0.23199436950683594, 0.2316083221435547, 0.2318725128173828, 0.2318663635253906, 0.2320168914794922, 0.2316748809814453, 0.23151718139648436, 0.2315161590576172, 0.23198104858398438, 0.23176908874511717, 0.2314844207763672, 0.23157452392578126, 0.2316400604248047, 0.2338518981933594, 0.23160319519042968, 0.2314608612060547, 0.23169842529296875, 0.23176499938964842, 0.23139634704589843, 0.47871487426757814, 0.23285452270507812, 0.2316759033203125, 0.23147109985351563, 0.2317619171142578, 0.23147418212890625, 0.23200460815429688, 0.23145368957519533, 0.23143423461914062, 0.23151513671875, 0.23195852661132813, 0.23152946472167968, 0.23166259765625, 0.2315407409667969, 0.231804931640625, 0.23130111694335936, 0.23147418212890625, 0.23145368957519533, 0.2317332458496094, 0.23229338073730468, 0.23143423461914062, 0.23145779418945311, 0.2318243865966797, 0.23271629333496094, 0.23164723205566407, 0.23130624389648438, 0.23173426818847656, 0.23160525512695312, 0.23191448974609374, 0.23143936157226563, 0.23173735046386718, 0.2313297882080078, 0.23191654968261718, 0.23161753845214844, 0.23174861145019532, 0.23163494873046875, 0.23162367248535157, 0.23179673767089845, 0.23365119934082032, 0.2315284423828125, 0.23203532409667968, 0.2314844207763672, 0.23151820373535156, 0.23153254699707032, 0.23219815063476562, 0.23247462463378907, 0.23148236083984375, 0.2316247100830078, 0.23154893493652343, 0.23167999267578124, 0.23152333068847655, 0.2313492431640625, 0.23187149047851563, 0.2317332458496094, 0.23147314453125, 0.23146803283691406, 0.231546875, 0.23138099670410156, 0.23195443725585937, 0.23131033325195313, 0.2316083221435547, 0.23169331359863282, 0.23267942810058595, 0.4799190979003906, 0.23168614196777343, 0.2323384246826172, 0.23134413146972657, 0.2315478973388672, 0.23153152465820312, 0.23206399536132813, 0.23240089416503906, 0.2316400604248047, 0.232195068359375, 0.23177113342285155, 0.23198208618164062, 0.231762939453125, 0.23141580200195314, 0.23243571472167968, 0.23172607421875, 0.23175167846679687, 0.23154380798339844, 0.23196159362792967, 0.2322186279296875, 0.23200767517089843, 0.23159706115722656, 0.23196774291992187, 0.23436288452148438, 0.23299481201171876, 0.23171994018554687, 0.23219917297363282, 0.23193702697753907, 0.23206809997558595, 0.23197900390625, 0.232015869140625, 0.23205580139160156, 0.2321817626953125, 0.23225856018066407, 0.23298252868652344, 0.23273779296875, 0.23192576599121092, 0.23256166076660156, 0.23193702697753907, 0.23206912231445312, 0.23249714660644533, 0.23228518676757812, 0.2316943359375, 0.23190733337402344, 0.2321776580810547, 0.23329791259765625, 0.2317434844970703, 0.23172300720214845, 0.23201791381835937, 0.23177830505371094, 0.23229849243164064, 0.23173631286621094, 0.23190631103515624, 0.23168716430664063, 0.23162879943847656, 0.2316021728515625, 0.23181619262695313, 0.23235891723632812, 0.23165338134765626, 0.23182540893554687, 0.23188890075683594, 0.23179878234863283, 0.23223193359375, 0.4815288391113281, 0.23210905456542968, 0.23236915588378906, 0.23213363647460938, 0.23194931030273438, 0.2322708435058594, 0.2319656982421875, 0.23167999267578124, 0.23199026489257812, 0.23194419860839843, 0.23190220642089843, 0.23164620971679686, 0.23152333068847655, 0.23174656677246094, 0.23165132141113282, 0.23153868103027345, 0.23241421508789062, 0.23152333068847655, 0.23160012817382813, 0.2317813720703125, 0.23216844177246093, 0.23163699340820312, 0.2317332458496094, 0.23213157653808594, 0.23221554565429686, 0.23165235900878905, 0.23165029907226561, 0.2321817626953125, 0.23166566467285157, 0.23215718078613282, 0.23198719787597658, 0.23195852661132813, 0.2326824951171875, 0.23238552856445313, 0.23246131896972655, 0.2325893096923828, 0.23215411376953124, 0.23172505187988282, 0.23226573181152343, 0.2317496337890625, 0.23191552734375, 0.23176908874511717, 0.23195545959472658, 0.2318008270263672, 0.23182643127441407, 0.23190016174316405, 0.232416259765625, 0.2320189514160156, 0.23202508544921874, 0.23167999267578124, 0.23148748779296874, 0.23161036682128905, 0.2314598388671875, 0.23159500122070312, 0.23170457458496094, 0.23154278564453126, 0.23169024658203125, 0.23182643127441407, 0.23170355224609376, 0.2315704345703125, 0.23159603881835938, 0.23155404663085938, 0.23165338134765626, 0.4812718200683594, 0.23177317810058592, 0.23226675415039064, 0.23194111633300782, 0.2323927001953125, 0.23203225708007813, 0.2321274871826172, 0.23159910583496093, 0.2321274871826172, 0.23236813354492186, 0.23202099609375, 0.23199026489257812, 0.2322821044921875, 0.23166873168945312, 0.2315704345703125, 0.2316953582763672, 0.2317066192626953, 0.23233638000488283, 0.23172402954101562, 0.23153765869140625, 0.2317015075683594, 0.23190835571289062, 0.2319349822998047, 0.23167079162597656, 0.23204454040527345, 0.23182949829101562, 0.2317066192626953, 0.23161241149902342, 0.23168716430664063, 0.23161651611328124, 0.2314915771484375, 0.23156736755371093, 0.23175578308105468, 0.2314915771484375, 0.2315898895263672, 0.2314639434814453, 0.23174041748046875, 0.23146188354492186, 0.23163699340820312, 0.231689208984375, 0.23188275146484374, 0.23198002624511718, 0.23282687377929687, 0.23209779357910157, 0.23285350036621094, 0.23174656677246094, 0.23175578308105468, 0.23238450622558593, 0.2317015075683594, 0.23150489807128907, 0.23172300720214845, 0.231625732421875, 0.23256166076660156, 0.23199641418457032, 0.232195068359375, 0.23204556274414062, 0.23192166137695314, 0.2320394287109375, 0.23225138854980468, 0.23171379089355468, 0.2317813720703125, 0.23161958312988282, 0.232158203125, 0.4819783630371094, 0.2318551025390625, 0.23196159362792967, 0.23218585205078124, 0.2324418487548828, 0.23145881652832032, 0.2316441650390625, 0.2316216278076172, 0.2316216278076172, 0.23146598815917968, 0.23160525512695312, 0.2316451873779297, 0.23194111633300782, 0.23152639770507813, 0.23173939514160155, 0.23201075744628907, 0.23212953186035157, 0.23183258056640624, 0.23173837280273438, 0.2314915771484375, 0.2315274200439453, 0.23239474487304687, 0.23221554565429686, 0.23157145690917968, 0.23228518676757812, 0.23169842529296875, 0.23386726379394532, 0.23193087768554688, 0.23240908813476563, 0.23179161071777343, 0.2318612518310547, 0.23229029846191407, 0.2322391052246094, 0.2321817626953125, 0.2317076416015625, 0.23165338134765626, 0.231762939453125, 0.23216537475585938, 0.23200869750976563, 0.23169024658203125, 0.2321448974609375, 0.23209881591796874, 0.23225343322753905, 0.2321080322265625, 0.2319605712890625, 0.23185305786132812, 0.23235276794433593, 0.23230157470703125, 0.23171379089355468, 0.23146905517578126, 0.2321694793701172, 0.2319288330078125, 0.231699462890625, 0.23157760620117188, 0.23183871459960936, 0.23164723205566407, 0.23158476257324218, 0.2316769256591797, 0.2315581512451172, 0.23161241149902342, 0.23160627746582033, 0.23157452392578126, 0.23163392639160157]",tokens/s,4.248315626753965,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1452.249088,2449.997824,0.0,1803.55072,1664.521216,s,10,1.3683243865966794,0.13683243865966793,0.001337233378547869,0.13643169403076172,0.13770939025878906,0.13902387695312501,0.14007546630859374,"[0.14033836364746094, 0.13655836486816406, 0.13525657653808593, 0.13630502319335938, 0.13605914306640626, 0.13580105590820313, 0.13619818115234375, 0.13698959350585938, 0.1374172821044922, 0.1374008026123047]",tokens/s,1870.9013922987058,kWh,1.6058788441859926e-06,8.799470783644112e-07,6.656266586270105e-06,9.14209250882051e-06,tokens/kWh,28002341.887593575,MB,1453.084672,2449.997824,0.0,1803.55072,1763.593728,s,10,81.66558447265625,8.166558447265626,0.005684297838363884,8.165376708984375,8.17551572265625,8.17556669921875,8.17560748046875,"[8.17105712890625, 8.1615234375, 8.15890625, 8.16109033203125, 8.165703125, 8.16225, 8.16505029296875, 8.17550439453125, 8.1688818359375, 8.17561767578125]",tokens/s,7.714387940380691,kWh,9.658040946623585e-05,5.293287202049287e-05,0.0003923776359739301,0.0005418909174606587,tokens/kWh,116259.56067915421,,s,629,82.76125488281265,0.13157592191226153,0.016262281457391132,0.129438720703125,0.13028741455078124,0.1309923309326172,0.26588740966796875,"[0.1330462646484375, 0.13207244873046875, 0.13133619689941406, 0.12958412170410155, 0.1294899139404297, 0.1299087371826172, 0.12952166748046876, 0.12904652404785155, 0.12932403564453124, 0.12959437561035156, 0.12922367858886719, 0.13178367614746095, 0.12983705139160157, 0.12965171813964843, 0.12943974304199218, 0.12977151489257813, 0.12933529663085938, 0.12956877136230469, 0.1295636444091797, 0.12963430786132812, 0.12953907775878906, 0.12950630187988282, 0.12945408630371094, 0.12915711975097657, 0.12958924865722657, 0.12955238342285155, 0.12918885803222657, 0.12946330261230468, 0.1300633544921875, 0.12945613098144532, 0.12942131042480468, 0.130693115234375, 0.1296414794921875, 0.12933836364746093, 0.129259521484375, 0.1293465576171875, 0.12949095153808593, 0.12932199096679686, 0.12919705200195314, 0.129396728515625, 0.12944178771972656, 0.12925645446777342, 0.12930149841308594, 0.12936805725097655, 0.12971110534667968, 0.12947967529296875, 0.1293475799560547, 0.12937420654296874, 0.12945510864257812, 0.12943974304199218, 0.12923904418945312, 0.12946636962890626, 0.12930047607421874, 0.12932710266113281, 0.12942848205566407, 0.1293578186035156, 0.12930149841308594, 0.12927488708496093, 0.12947865295410158, 0.12933529663085938, 0.12928306579589843, 0.12944178771972656, 0.2679490661621094, 0.12944281005859376, 0.12929330444335937, 0.1295994873046875, 0.1293639678955078, 0.12922061157226564, 0.1296312255859375, 0.1300070343017578, 0.129291259765625, 0.1292584991455078, 0.12935987854003905, 0.12954214477539064, 0.12965989685058593, 0.12922880554199218, 0.12928102111816406, 0.13002546691894531, 0.13103411865234374, 0.12998860168457033, 0.12921856689453126, 0.12967832946777344, 0.12930764770507813, 0.1296117706298828, 0.12917861938476563, 0.12936805725097655, 0.12924006652832032, 0.13045452880859376, 0.12976127624511719, 0.12940185546875, 0.129154052734375, 0.129870849609375, 0.12965274047851563, 0.12968447875976563, 0.12930560302734376, 0.129406982421875, 0.12957696533203125, 0.12942848205566407, 0.12912844848632812, 0.12938957214355468, 0.12942131042480468, 0.1295319061279297, 0.12902093505859374, 0.12939776611328124, 0.12929023742675783, 0.1295431671142578, 0.12914994812011718, 0.131240966796875, 0.1315758056640625, 0.12955442810058593, 0.12934962463378907, 0.12950119018554687, 0.1294192657470703, 0.12930560302734376, 0.12917555236816405, 0.12942745971679687, 0.12936909484863282, 0.1293506622314453, 0.1292216339111328, 0.12930560302734376, 0.12932710266113281, 0.12941722106933592, 0.12931890869140625, 0.1293260803222656, 0.12920217895507813, 0.26541976928710936, 0.12952268981933593, 0.12942335510253905, 0.129438720703125, 0.12940083312988282, 0.12945613098144532, 0.12953599548339845, 0.12932095336914062, 0.1292216339111328, 0.12939776611328124, 0.12944178771972656, 0.12960153198242189, 0.129470458984375, 0.13039923095703124, 0.1294571533203125, 0.12955955505371095, 0.12928614807128908, 0.1294202880859375, 0.129259521484375, 0.12932095336914062, 0.12926258850097655, 0.12934144592285157, 0.12937420654296874, 0.12949708557128906, 0.12930149841308594, 0.12997734069824218, 0.12926771545410157, 0.1293711395263672, 0.12937318420410157, 0.13014016723632812, 0.1300695037841797, 0.12950630187988282, 0.12973773193359375, 0.12933529663085938, 0.12930458068847656, 0.1293711395263672, 0.12979302978515625, 0.12978994750976564, 0.12939366149902343, 0.13079551696777345, 0.12929330444335937, 0.12930458068847656, 0.12950425720214845, 0.1293291473388672, 0.12928306579589843, 0.12939468383789063, 0.12921139526367187, 0.13056614685058593, 0.1297909698486328, 0.12958822631835937, 0.12945613098144532, 0.12936703491210938, 0.1293086700439453, 0.12931173706054688, 0.12922265625, 0.129512451171875, 0.12923802185058594, 0.12934144592285157, 0.13001522827148437, 0.1295636444091797, 0.1292533721923828, 0.12930458068847656, 0.12924517822265624, 0.2659522705078125, 0.1293527069091797, 0.129328125, 0.12934860229492187, 0.1293148193359375, 0.12935679626464844, 0.12930560302734376, 0.12933734130859376, 0.12967628479003906, 0.12982579040527345, 0.12926463317871092, 0.12944793701171875, 0.12915916442871095, 0.12936090087890625, 0.1293506622314453, 0.1294264373779297, 0.12922061157226564, 0.12930047607421874, 0.12926258850097655, 0.1294448699951172, 0.12934349060058595, 0.129328125, 0.12933836364746093, 0.12944383239746093, 0.12915711975097657, 0.12933938598632813, 0.1303726043701172, 0.1295083465576172, 0.1293885498046875, 0.12941107177734376, 0.12947763061523437, 0.12969778442382812, 0.12942848205566407, 0.12960972595214842, 0.13009408569335937, 0.1296506805419922, 0.12932095336914062, 0.12933631896972655, 0.1293148193359375, 0.12935577392578124, 0.12930149841308594, 0.129944580078125, 0.1299752960205078, 0.13082009887695312, 0.13123277282714843, 0.129976318359375, 0.12939776611328124, 0.12972647094726564, 0.1297725372314453, 0.12937522888183595, 0.12929843139648436, 0.12931686401367187, 0.12993843078613282, 0.12966297912597657, 0.129470458984375, 0.12965274047851563, 0.12924826049804689, 0.1299988555908203, 0.12943359375, 0.12968345642089843, 0.12955853271484374, 0.12938957214355468, 0.12927590942382813, 0.2658734130859375, 0.1293506622314453, 0.12944691467285158, 0.1293885498046875, 0.12947354125976562, 0.12939981079101562, 0.12928819274902345, 0.12991282653808595, 0.12973670959472655, 0.12963430786132812, 0.12942233276367188, 0.129364990234375, 0.12963226318359375, 0.12936090087890625, 0.12928819274902345, 0.12908441162109374, 0.12943565368652343, 0.1296312255859375, 0.1292093505859375, 0.12931993103027345, 0.12937216186523437, 0.1293834228515625, 0.12934553527832032, 0.12939263916015625, 0.12918988037109375, 0.12930252075195312, 0.1292359619140625, 0.12959027099609374, 0.12928306579589843, 0.12924313354492187, 0.1291550750732422, 0.129617919921875, 0.12926258850097655, 0.1292359619140625, 0.1293322296142578, 0.12940800476074218, 0.12928819274902345, 0.12947967529296875, 0.12928512573242187, 0.13084877014160157, 0.12966297912597657, 0.12936294555664063, 0.12929536437988282, 0.12989439392089844, 0.1293639678955078, 0.1298913269042969, 0.12968038940429688, 0.12977459716796874, 0.1292738494873047, 0.1299230651855469, 0.12984115600585938, 0.1298524169921875, 0.12960870361328125, 0.12983091735839844, 0.12971725463867187, 0.12981964111328126, 0.13102899169921875, 0.13176730346679688, 0.13048524475097656, 0.13046885681152343, 0.12975410461425782, 0.12993125915527343, 0.130081787109375, 0.2661335144042969, 0.1312348175048828, 0.13016166687011718, 0.12974490356445312, 0.12950425720214845, 0.12989849853515625, 0.13022105407714843, 0.13010943603515626, 0.12975205993652345, 0.13035621643066406, 0.12987493896484376, 0.1298892822265625, 0.13020057678222657, 0.13007872009277344, 0.12971827697753907, 0.12933427429199218, 0.1293506622314453, 0.12941619873046875, 0.12931277465820312, 0.12941311645507814, 0.12987596130371093, 0.12993434143066407, 0.12928921508789062, 0.12916940307617186, 0.12927897644042968, 0.129470458984375, 0.12943359375, 0.12916326904296874, 0.12934962463378907, 0.12948069763183595, 0.12918476867675782, 0.12958412170410155, 0.12934451293945312, 0.12950936889648437, 0.12927180480957032, 0.12952268981933593, 0.129328125, 0.1293578186035156, 0.13088050842285157, 0.12939878845214844, 0.12924006652832032, 0.12944178771972656, 0.12921856689453126, 0.12951962280273438, 0.12917759704589843, 0.12927488708496093, 0.12930047607421874, 0.1292359619140625, 0.12929638671875, 0.12937318420410157, 0.1292584991455078, 0.1293639678955078, 0.12952986145019532, 0.12940902709960939, 0.12924826049804689, 0.12945613098144532, 0.12931071472167968, 0.12931993103027345, 0.12928614807128908, 0.12941619873046875, 0.12934860229492187, 0.12924620056152344, 0.12929536437988282, 0.2658928527832031, 0.12976742553710938, 0.12933836364746093, 0.1294448699951172, 0.12936192321777343, 0.1305374755859375, 0.1294581756591797, 0.12936909484863282, 0.13020774841308594, 0.1295667266845703, 0.12935577392578124, 0.1293516845703125, 0.12933836364746093, 0.12947456359863282, 0.13004389953613282, 0.13297048950195312, 0.12997836303710938, 0.12952268981933593, 0.1292922821044922, 0.12959642028808593, 0.12940185546875, 0.1293527069091797, 0.1293834228515625, 0.13002546691894531, 0.12959129333496094, 0.1294202880859375, 0.1293854675292969, 0.12959744262695314, 0.12931686401367187, 0.1293711395263672, 0.1293516845703125, 0.1293291473388672, 0.12929638671875, 0.1294264373779297, 0.13180621337890625, 0.12990361022949218, 0.1295431671142578, 0.12950323486328125, 0.1293824005126953, 0.12971417236328125, 0.1290260467529297, 0.129438720703125, 0.1293291473388672, 0.12946022033691407, 0.12919398498535156, 0.12934860229492187, 0.12931993103027345, 0.1294571533203125, 0.12999270629882811, 0.12952677917480468, 0.12935679626464844, 0.1295800323486328, 0.12942950439453124, 0.13058047485351562, 0.12943463134765626, 0.1296117706298828, 0.12902093505859374, 0.1293578186035156, 0.12931788635253907, 0.12939366149902343, 0.12926054382324217, 0.129364990234375, 0.1295247344970703, 0.26704998779296873, 0.13152255249023437, 0.12972236633300782, 0.1295697937011719, 0.12936703491210938, 0.12935987854003905, 0.13026611328125, 0.130302978515625, 0.12928819274902345, 0.129544189453125, 0.12924006652832032, 0.1293322296142578, 0.12927999877929688, 0.12953599548339845, 0.12984831237792968, 0.12931993103027345, 0.12948480224609374, 0.1293824005126953, 0.13099110412597656, 0.1302159423828125, 0.12943052673339844, 0.12947148132324218, 0.12942233276367188, 0.12940083312988282, 0.12913253784179687, 0.12946022033691407, 0.1293096923828125, 0.12943463134765626, 0.1309276123046875, 0.13064703369140626, 0.12926873779296874, 0.129396728515625, 0.13017190551757812, 0.12957901000976563, 0.12975001525878907, 0.13083033752441406, 0.13063987731933593, 0.12994560241699218, 0.12968345642089843, 0.12934349060058595, 0.129364990234375, 0.12946022033691407, 0.12915711975097657, 0.1293260803222656, 0.13071565246582031, 0.1304012756347656, 0.12986265563964844, 0.13138021850585938, 0.13076173400878907, 0.12949197387695313, 0.12930560302734376, 0.1294520263671875, 0.12977049255371093, 0.12957183837890626, 0.12964454650878907, 0.13065728759765624, 0.12959437561035156, 0.12931788635253907, 0.12932199096679686, 0.1294929962158203, 0.12976025390625, 0.1295626220703125, 0.12928306579589843, 0.2670551147460937, 0.13002035522460936, 0.12955033874511718, 0.12985139465332032, 0.12954725646972656, 0.12931277465820312, 0.12925645446777342, 0.13027226257324218, 0.1294694366455078, 0.1295308837890625, 0.1293824005126953, 0.12973260498046876, 0.1293701171875, 0.13028352355957032, 0.13105357360839845, 0.13017805480957031, 0.1294264373779297, 0.1304217529296875, 0.12934144592285157, 0.12950425720214845, 0.12933836364746093, 0.12946124267578124, 0.1294581756591797, 0.12975514221191406, 0.12941107177734376, 0.1294151611328125, 0.12950527954101562, 0.12952064514160155, 0.12930560302734376, 0.12943052673339844, 0.12936703491210938, 0.1306746826171875, 0.12937625122070312, 0.1297418212890625, 0.12938957214355468, 0.12986880493164063, 0.1292728271484375, 0.12953497314453125, 0.13047296142578124, 0.1296373748779297, 0.1293711395263672, 0.13099314880371093, 0.1300480041503906, 0.12995071411132814, 0.12928102111816406, 0.12935475158691406, 0.12918988037109375, 0.12929843139648436, 0.12932301330566406, 0.1293711395263672, 0.12942745971679687, 0.1291304931640625, 0.12926976013183594, 0.12948684692382811, 0.12928102111816406, 0.12931993103027345, 0.1292728271484375, 0.12934860229492187, 0.1300746307373047, 0.1295749053955078, 0.13162393188476562, 0.12988621520996094, 0.1299630126953125, 0.2670069885253906, 0.1296711730957031, 0.12933734130859376, 0.129396728515625, 0.1302415313720703, 0.1293096923828125, 0.12932301330566406, 0.12936909484863282, 0.1292728271484375, 0.12964659118652344, 0.1292410888671875, 0.12936090087890625, 0.129227783203125, 0.12946124267578124, 0.12994969177246093, 0.12929536437988282, 0.12978585815429688, 0.12964659118652344, 0.1294264373779297, 0.1293639678955078, 0.12969676208496095, 0.12943052673339844, 0.1292349395751953, 0.1293148193359375, 0.1295564727783203, 0.12957594299316405, 0.13105255126953125, 0.131240966796875, 0.13059481811523438, 0.1293588409423828, 0.1293588409423828, 0.12939059448242188, 0.129328125, 0.12935577392578124, 0.12926463317871092, 0.1297838134765625, 0.13002546691894531, 0.12994969177246093, 0.1295677490234375, 0.12959642028808593, 0.12966297912597657, 0.12990054321289063, 0.1307125701904297, 0.12997222900390626, 0.13009202575683593, 0.12955955505371095, 0.12987187194824218, 0.13022822570800782, 0.1295114288330078, 0.12963328552246095, 0.12929638671875, 0.12961587524414062, 0.12946841430664063, 0.1294254150390625, 0.131989501953125, 0.131093505859375, 0.12946226501464844, 0.129470458984375, 0.13014732360839842, 0.13120204162597657, 0.13035110473632813, 0.12946124267578124, 0.13090509033203124]",tokens/s,7.600174754365983,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667794a5-67a1bb2d20c486f85797fee2;52672858-7aff-4dcf-91f1-0af2a3718807) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1554.096128,1957.167104,0.0,1310.72,1163.82464,s,10,1.286997444152832,0.1286997444152832,0.0010709154419692667,0.12870168304443358,0.13024390106201172,0.13026507034301757,0.13028200576782226,"[0.13028623962402344, 0.13023919677734375, 0.1282391662597656, 0.12805363464355468, 0.1269244155883789, 0.12729046630859375, 0.12865869140625, 0.1287446746826172, 0.1295413055419922, 0.1290196533203125]",tokens/s,1989.1259393177147,kWh,1.4998026454163673e-06,8.218217431655106e-07,6.2844740852399835e-06,8.60609847382186e-06,tokens/kWh,29746347.985524923,MB,1554.096128,1959.264256,0.0,1312.817152,1232.774656,s,10,75.16664355468751,7.516664355468751,0.019324999879587898,7.517852294921875,7.5454459960937506,7.548604345703125,7.551131025390625,"[7.5517626953125, 7.5180322265625, 7.52255224609375, 7.52390966796875, 7.544744140625, 7.51767236328125, 7.49461669921875, 7.49106689453125, 7.50185205078125, 7.5004345703125]",tokens/s,8.3813772999142,kWh,8.89009176429398e-05,4.872418928564946e-05,0.00036540068437377195,0.0005030257913023612,tokens/kWh,125242.08716393956,,s,629,76.20247959136968,0.12114861620249542,0.01533479707935131,0.1192816619873047,0.12045271148681641,0.12072119903564453,0.24720068908691412,"[0.12236902618408203, 0.12192870330810547, 0.12110336303710938, 0.12021862030029297, 0.12056371307373047, 0.11989708709716797, 0.11929497528076172, 0.11829862213134766, 0.11859661102294922, 0.11861504364013672, 0.11821469116210938, 0.11968099212646484, 0.11959193420410157, 0.12045926666259765, 0.12021043395996094, 0.1199452133178711, 0.11970047760009765, 0.12003225708007813, 0.120163330078125, 0.11992678070068359, 0.11977011108398437, 0.11847372436523437, 0.11856588745117187, 0.11883724975585938, 0.11842662048339844, 0.11851676940917968, 0.11948745727539062, 0.11911475372314453, 0.119552001953125, 0.1215068130493164, 0.120257568359375, 0.1204623031616211, 0.12012236785888672, 0.12004659271240234, 0.12008038330078125, 0.12001689910888672, 0.11995954895019531, 0.12004761505126953, 0.12005683135986328, 0.12010189056396485, 0.12003635406494141, 0.11968307495117188, 0.12004863739013671, 0.12036300659179687, 0.11988992309570312, 0.1200558090209961, 0.1202534408569336, 0.12022169494628906, 0.12032921600341796, 0.12026265716552734, 0.12077056121826171, 0.12018585968017578, 0.11901235198974609, 0.1193861083984375, 0.11910348510742187, 0.11925606536865234, 0.11975270080566407, 0.12020941162109375, 0.11999846649169922, 0.12033843231201172, 0.11914854431152344, 0.12003942108154297, 0.2506158142089844, 0.12027391815185547, 0.11959091186523438, 0.11991551971435546, 0.1192816619873047, 0.11801292419433594, 0.11855052947998047, 0.11803955078125, 0.11798016357421875, 0.11803241729736329, 0.11873174285888671, 0.12019200134277344, 0.11996057891845703, 0.11952947235107422, 0.11857612609863281, 0.11973939514160156, 0.11993702697753907, 0.11889049530029297, 0.11839385223388672, 0.11943730926513672, 0.11985203552246093, 0.11852082824707032, 0.11822898864746094, 0.11862739562988281, 0.11842758178710938, 0.11852185821533204, 0.1185054702758789, 0.11835596466064453, 0.11835699462890625, 0.11841331481933594, 0.11834674835205078, 0.11860070037841797, 0.12062003326416015, 0.11992473602294922, 0.12043059539794922, 0.12157746887207031, 0.12022579193115235, 0.11964927673339844, 0.12050534057617188, 0.12049407958984375, 0.12015615844726563, 0.11910553741455078, 0.12046438598632812, 0.1204510726928711, 0.12023603057861328, 0.11975885009765624, 0.12176998138427735, 0.12023705291748046, 0.12054937744140624, 0.11995750427246094, 0.12132864379882813, 0.11925917053222657, 0.1186283187866211, 0.11835699462890625, 0.11851776123046875, 0.11839794921875, 0.11879730987548828, 0.11951107025146485, 0.11866006469726563, 0.11900109100341796, 0.11836723327636718, 0.11853414154052734, 0.11843583679199218, 0.24882893371582032, 0.11982233428955077, 0.11939225769042969, 0.12049919891357422, 0.12002508544921875, 0.1198919677734375, 0.12023910522460937, 0.11994217681884765, 0.11870614624023437, 0.11836006164550782, 0.11821260833740234, 0.11851570892333985, 0.11971686553955078, 0.12000460815429688, 0.11984076690673828, 0.11931136322021485, 0.11905126190185547, 0.11986329650878906, 0.11975373077392579, 0.11980595397949219, 0.1196267547607422, 0.11956735992431641, 0.12024422454833984, 0.12106342315673828, 0.11977728271484375, 0.1197127685546875, 0.11865599822998046, 0.11831404876708984, 0.11899692535400391, 0.11852082824707032, 0.11829145812988281, 0.11852595520019531, 0.11846348571777343, 0.11836518096923829, 0.11844812774658203, 0.11833757019042969, 0.11830780792236328, 0.11964620971679688, 0.11897650909423828, 0.11840921783447265, 0.11968511962890625, 0.11841024017333984, 0.11969535827636718, 0.11970559692382812, 0.12029644775390624, 0.12001078033447266, 0.11961955261230468, 0.12051967620849609, 0.11861196899414063, 0.11842150115966797, 0.11837133026123046, 0.12028313446044922, 0.11990322875976563, 0.1188751983642578, 0.12033939361572266, 0.11918643188476563, 0.1187583999633789, 0.12003942108154297, 0.11934515380859376, 0.12007730865478515, 0.12025138854980469, 0.12019916534423829, 0.12015615844726563, 0.24848793029785157, 0.11997798156738282, 0.11991763305664062, 0.12037625885009766, 0.12008345794677734, 0.11944652557373046, 0.12011827087402344, 0.12008448028564453, 0.11987149047851563, 0.12019404602050782, 0.12016851043701172, 0.11977823638916016, 0.11987865447998047, 0.11973426818847656, 0.12008038330078125, 0.11942092895507812, 0.11997491455078126, 0.11880242919921875, 0.11979673767089843, 0.1197127685546875, 0.11939532470703125, 0.11976703643798828, 0.11997494506835937, 0.12019094085693359, 0.12064665222167968, 0.12001996612548828, 0.12060364532470703, 0.12040294647216797, 0.11997388458251954, 0.11948646545410156, 0.11998822021484375, 0.11993907165527344, 0.11955097961425781, 0.11953561401367188, 0.11927756500244141, 0.11897036743164062, 0.11999334716796875, 0.11905126190185547, 0.11846553802490234, 0.11841843414306641, 0.11854847717285157, 0.11835596466064453, 0.11830995178222656, 0.11828428649902344, 0.1203987808227539, 0.11818905639648437, 0.1193707504272461, 0.11814604949951171, 0.11847782135009766, 0.1183078384399414, 0.11799350738525391, 0.11804771423339844, 0.11817372894287109, 0.11848700714111328, 0.11852082824707032, 0.11849932861328125, 0.12020735931396484, 0.120089599609375, 0.12004761505126953, 0.12012134552001953, 0.11869286346435547, 0.11855974578857421, 0.11863142395019531, 0.24572006225585938, 0.1183477783203125, 0.11846041870117187, 0.11811737823486328, 0.1181470718383789, 0.11830067443847657, 0.11928985595703125, 0.11911065673828125, 0.1185771484375, 0.11835187530517578, 0.12018994903564453, 0.12008550262451172, 0.11990534210205078, 0.1200219497680664, 0.12004659271240234, 0.11994931030273437, 0.12035686492919923, 0.12041522979736329, 0.12053298950195312, 0.12031999969482422, 0.12058419036865234, 0.12013158416748047, 0.12075929260253906, 0.11833036804199219, 0.1206794204711914, 0.12085862731933594, 0.12161433410644532, 0.11919667053222656, 0.1187041244506836, 0.11844300842285156, 0.11833650970458984, 0.11833856201171875, 0.1186355209350586, 0.11820236968994141, 0.12034559631347656, 0.12019712066650391, 0.12016947174072265, 0.11850035095214843, 0.12005375671386719, 0.12051763153076171, 0.11887615966796874, 0.11807539367675782, 0.11823411560058594, 0.11905741119384766, 0.12032307434082032, 0.1189775390625, 0.12029132843017579, 0.12084019470214843, 0.12052992248535156, 0.1207357406616211, 0.12054220581054688, 0.12078284454345703, 0.12055244445800781, 0.12059852600097656, 0.12064358520507812, 0.12082486724853515, 0.1207326431274414, 0.12082278442382813, 0.12053196716308594, 0.12073065948486328, 0.12090262603759766, 0.12064870452880859, 0.12058112335205078, 0.24601292419433593, 0.11817881774902343, 0.11808255767822265, 0.11800780487060547, 0.118255615234375, 0.11830067443847657, 0.11829145812988281, 0.11941478729248046, 0.12005375671386719, 0.1198202896118164, 0.12005785369873047, 0.11806412506103516, 0.11867750549316407, 0.11829145812988281, 0.11833548736572265, 0.11830989074707031, 0.11806822204589844, 0.11833241271972657, 0.11837747192382812, 0.11845120239257813, 0.11834572601318359, 0.11841228485107422, 0.11827609252929687, 0.11805286407470703, 0.11896729278564454, 0.11836313629150391, 0.11981619262695313, 0.12392044830322266, 0.1205656967163086, 0.12015821075439453, 0.11990016174316406, 0.12003533172607422, 0.11934003448486329, 0.1199974365234375, 0.1193154525756836, 0.11959500885009766, 0.11863040161132812, 0.1188106231689453, 0.12067327880859376, 0.11914444732666016, 0.11830477142333984, 0.11853721618652344, 0.1185269775390625, 0.11941990661621094, 0.11858329772949219, 0.1186693115234375, 0.12036608123779297, 0.12004557037353515, 0.1200558090209961, 0.12050841522216797, 0.12022681427001954, 0.12039065551757812, 0.12011212921142578, 0.12010495758056641, 0.12036608123779297, 0.1201817626953125, 0.12043673706054688, 0.12033843231201172, 0.12007526397705078, 0.12000972747802735, 0.12006505584716796, 0.11996975708007812, 0.11998822021484375, 0.2494791717529297, 0.12030156707763671, 0.11901952362060547, 0.11824127960205078, 0.1192816619873047, 0.11887308502197266, 0.11826175689697266, 0.11869388580322265, 0.11826687622070313, 0.11836211395263672, 0.11810918426513672, 0.11887104034423829, 0.11972198486328126, 0.11960012817382812, 0.1192468490600586, 0.118002685546875, 0.11875942230224609, 0.11941478729248046, 0.11890585327148437, 0.11843382263183594, 0.1191526107788086, 0.11959500885009766, 0.11970668792724609, 0.11957036590576171, 0.11933695983886719, 0.11909327697753906, 0.11842966461181641, 0.11905228424072266, 0.11911888122558593, 0.11894780731201172, 0.11848089599609375, 0.1181890869140625, 0.11840406036376953, 0.1190297622680664, 0.11982848358154297, 0.11972096252441407, 0.11864371490478516, 0.11924582672119141, 0.11929804992675781, 0.11825459289550781, 0.11951718139648437, 0.11916287994384765, 0.11947417449951171, 0.11800064086914062, 0.11845017242431641, 0.11818089294433594, 0.11930210876464843, 0.1199810562133789, 0.11853517150878906, 0.11870310211181641, 0.11945164489746093, 0.11844096374511719, 0.1184194564819336, 0.118508544921875, 0.11926220703125, 0.12017356872558593, 0.11892838287353516, 0.1193021469116211, 0.11871849822998047, 0.11844911956787109, 0.11832115173339844, 0.11867545318603516, 0.11844403076171875, 0.24766259765625, 0.11916802978515625, 0.12123133087158203, 0.12028313446044922, 0.11920588684082031, 0.11884953308105468, 0.11862528228759765, 0.11994931030273437, 0.11886182403564453, 0.11885363006591797, 0.12013670349121094, 0.11913011169433593, 0.11992473602294922, 0.11888435363769531, 0.11946598052978516, 0.11882707214355469, 0.1207070083618164, 0.11865702056884765, 0.11839590454101563, 0.11839078521728516, 0.11947724914550781, 0.11919769287109375, 0.11886080169677735, 0.11848601531982422, 0.11876966094970703, 0.11810099029541016, 0.11880960083007812, 0.11817369842529298, 0.11927142333984375, 0.11965235137939453, 0.118761474609375, 0.11863654327392578, 0.11901439666748047, 0.11827814483642578, 0.11898368072509766, 0.11854233551025391, 0.11844915008544922, 0.11848397064208985, 0.11839078521728516, 0.11844608306884766, 0.11859455871582031, 0.11865190124511718, 0.11820441436767579, 0.11907686614990234, 0.11970867156982422, 0.11921715545654298, 0.11824230194091796, 0.11921817779541016, 0.11916390228271484, 0.11929804992675781, 0.11828326416015625, 0.11838361358642578, 0.11848499298095704, 0.11860991668701172, 0.11808153533935548, 0.11826588439941406, 0.11823715209960937, 0.11888639831542969, 0.11884031677246094, 0.11812454223632812, 0.11833446502685546, 0.11847577667236328, 0.11946905517578126, 0.24866712951660155, 0.11849318695068359, 0.11980188751220704, 0.11915773010253906, 0.11846041870117187, 0.1203589096069336, 0.1205381088256836, 0.11978342437744141, 0.11976601409912109, 0.11947417449951171, 0.11962163543701172, 0.11930316925048828, 0.11854541015625, 0.11897344207763672, 0.11885158538818359, 0.11875020599365234, 0.1181839370727539, 0.11931443023681641, 0.11823616027832032, 0.11900313568115234, 0.11816345977783203, 0.11854847717285157, 0.11989708709716797, 0.11902668762207032, 0.11941683197021484, 0.11941580963134765, 0.11852185821533204, 0.11982540893554687, 0.11974553680419922, 0.11874918365478515, 0.11918950653076171, 0.11970355224609375, 0.11946189117431641, 0.11857100677490234, 0.11914035034179687, 0.11940966033935548, 0.1185689926147461, 0.11831394958496094, 0.11820441436767579, 0.11945574188232422, 0.11997388458251954, 0.1203240966796875, 0.11903180694580077, 0.11818495941162109, 0.11810201263427735, 0.11788288116455078, 0.11807539367675782, 0.11814604949951171, 0.11880038452148438, 0.11919872283935547, 0.11881983947753906, 0.11975987243652343, 0.11975373077392579, 0.11953568267822266, 0.12003424072265625, 0.11991961669921875, 0.12044185638427735, 0.11880550384521485, 0.11889356994628907, 0.11842253112792969, 0.11843379211425781, 0.11815936279296875, 0.11811634826660156, 0.24846543884277345, 0.11927139282226562, 0.11798732757568359, 0.11792998504638671, 0.11840512084960937, 0.11863859558105469, 0.11815321350097656, 0.11805184173583984, 0.11871437072753906, 0.11836109161376954, 0.1180794906616211, 0.11815424346923828, 0.1181317138671875, 0.11853004455566406, 0.11842150115966797, 0.11837542724609375, 0.11854847717285157, 0.11832217407226563, 0.11862220764160156, 0.11864985656738282, 0.11943526458740235, 0.11942912292480469, 0.11993395233154297, 0.12021145629882812, 0.12013568115234376, 0.12069990539550782, 0.12175667572021484, 0.12003942108154297, 0.11987763214111329, 0.11907788848876953, 0.1194076156616211, 0.11910553741455078, 0.11873177337646484, 0.11934413146972657, 0.11961958312988281, 0.11883519744873047, 0.11877273559570313, 0.11876761627197266, 0.12034662628173828, 0.12008140563964843, 0.11964415740966797, 0.12007014465332032, 0.12065280151367187, 0.11880754852294922, 0.11892736053466797, 0.11855974578857421, 0.11855052947998047, 0.11903590393066406, 0.11845938873291016, 0.11849523162841796, 0.1184716796875, 0.11835289764404297, 0.11853619384765625, 0.11834368133544922, 0.11937382507324219, 0.11912806701660156, 0.11848806762695313, 0.11961753845214844, 0.1190113296508789, 0.11965132904052735, 0.11860281372070312, 0.11934611511230468, 0.11991145324707031]",tokens/s,8.254324575433342,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1308.962816,6256.328704,0.0,5609.8816,5292.617216,s,10,5.571213256835938,0.5571213256835937,0.0007666840176307737,0.5568714599609375,0.557456005859375,0.5584093994140625,0.5591721142578125,"[0.55936279296875, 0.556702392578125, 0.5567649536132813, 0.5570609741210938, 0.5566168212890625, 0.5568453979492187, 0.557244140625, 0.5567742919921875, 0.5569439697265625, 0.5568975219726563]",tokens/s,459.5049376110048,kWh,6.58186607890659e-06,3.606554268359711e-06,3.10157809854445e-05,4.1204201332710805e-05,tokens/kWh,6212958.672172324,MB,1308.962816,6256.328704,0.0,5609.8816,5503.948288,s,10,327.20778125,32.720778124999995,0.005308491364446453,32.7202353515625,32.7266998046875,32.72907548828125,32.73097603515625,"[32.72102734375, 32.713080078125, 32.72320703125, 32.723677734375, 32.719443359375, 32.726171875, 32.7142578125, 32.71880859375, 32.71665625, 32.731451171875]",tokens/s,1.9253820847208232,kWh,0.00038621306252148413,0.00021167820178359155,0.0018056511605691545,0.00240354242487423,tokens/kWh,26211.31183207494,,s,629,331.6996517333982,0.527344438367883,0.06619643329461054,0.519362548828125,0.5196838745117187,0.5197864990234375,1.0765131103515626,"[0.5193318481445313, 0.5191546630859375, 0.5195264282226563, 0.5193011474609375, 0.519161865234375, 0.5189949340820312, 0.519024658203125, 0.5189488525390625, 0.51919873046875, 0.518950927734375, 0.5190645751953125, 0.5191629028320313, 0.5196636352539062, 0.5194833984375, 0.5193001098632812, 0.5194168090820312, 0.5192283935546875, 0.5194127807617187, 0.5191557006835937, 0.5193123779296875, 0.5200199584960937, 0.5194977416992187, 0.5196605224609375, 0.519141357421875, 0.5195980834960937, 0.5191690063476563, 0.5194024658203125, 0.51964208984375, 0.5193021240234375, 0.5191884765625, 0.5192212524414063, 0.5194844360351563, 0.5194926147460938, 0.5195140991210937, 0.519436279296875, 0.5195079956054688, 0.5191536865234375, 0.5194281005859375, 0.5195120849609375, 0.5192755126953125, 0.5196728515625, 0.5195980834960937, 0.5193359375, 0.5193533325195312, 0.5194619140625, 0.5194895629882812, 0.5192806396484375, 0.5191629028320313, 0.51945166015625, 0.519245849609375, 0.5195632934570312, 0.519736328125, 0.5192724609375, 0.5192140502929687, 0.5193277587890625, 0.5196113891601563, 0.5192888793945313, 0.519089111328125, 0.5195858154296875, 0.519647216796875, 0.5195612182617187, 0.5193564453125, 1.0763663330078126, 0.518898681640625, 0.5189488525390625, 0.5190789184570312, 0.5190236206054688, 0.5189979858398438, 0.5195140991210937, 0.5191475219726562, 0.5191536865234375, 0.5191444702148438, 0.5191588134765625, 0.519103515625, 0.5190625, 0.5190420532226563, 0.5190901489257812, 0.5192744750976562, 0.5193707275390625, 0.5191854248046875, 0.5194649658203125, 0.5193666381835937, 0.5193236694335938, 0.5194281005859375, 0.5191649169921875, 0.5191342163085938, 0.519046142578125, 0.5191383056640625, 0.5194874877929687, 0.5194271240234375, 0.5192376098632813, 0.5192274169921876, 0.5190830078125, 0.519161865234375, 0.51907275390625, 0.51944140625, 0.5194660034179688, 0.5193697509765625, 0.5191290893554688, 0.5192161254882812, 0.51917822265625, 0.51928271484375, 0.5191895141601562, 0.5192591552734375, 0.5195140991210937, 0.51934619140625, 0.5191177978515625, 0.51966259765625, 0.5194188842773437, 0.5195499267578125, 0.5193359375, 0.5193554077148438, 0.5193215942382813, 0.5193707275390625, 0.5193646240234375, 0.5194526977539062, 0.5195642700195312, 0.5192232666015625, 0.5191710815429688, 0.519193603515625, 0.5191710815429688, 0.519245849609375, 0.5192489013671875, 0.519278564453125, 0.5190645751953125, 1.076937744140625, 0.5192662963867187, 0.5193164672851562, 0.5193584594726562, 0.5193011474609375, 0.5191680297851563, 0.5191188354492188, 0.5191270141601563, 0.5195693969726562, 0.5193901977539063, 0.5192611694335938, 0.519352294921875, 0.519352294921875, 0.5192232666015625, 0.5193594970703125, 0.51930419921875, 0.519245849609375, 0.5191546630859375, 0.5192427368164062, 0.5194291381835937, 0.5196943359375, 0.519520263671875, 0.5194066162109375, 0.5193318481445313, 0.5193011474609375, 0.5194178466796875, 0.5193533325195312, 0.5193809814453125, 0.5195612182617187, 0.5193554077148438, 0.5194926147460938, 0.5192898559570313, 0.5194373168945312, 0.5194137573242188, 0.519341064453125, 0.5194496459960938, 0.5192283935546875, 0.51919873046875, 0.5195048828125, 0.5196339111328125, 0.5195560913085937, 0.519572509765625, 0.51938916015625, 0.5192929077148437, 0.5191874389648438, 0.5194547119140625, 0.5193114013671875, 0.5195682983398437, 0.5193922729492187, 0.5194547119140625, 0.5196246948242188, 0.5193451538085937, 0.5195867919921875, 0.5193400268554688, 0.5194721069335938, 0.5192376098632813, 0.51959912109375, 0.5194547119140625, 0.5198776245117187, 0.5195980834960937, 0.519794677734375, 0.5196093139648438, 0.5194988403320312, 1.07694482421875, 0.51944140625, 0.5193348999023437, 0.519572509765625, 0.5193656616210938, 0.5192765502929687, 0.5190225830078125, 0.5192960205078125, 0.5193001098632812, 0.519414794921875, 0.5196113891601563, 0.5192478637695312, 0.5194208984375, 0.5195591430664063, 0.5194547119140625, 0.5194055786132813, 0.5193932495117187, 0.5193236694335938, 0.519299072265625, 0.519203857421875, 0.519172119140625, 0.519413818359375, 0.5194751586914063, 0.5194956665039062, 0.5193031616210938, 0.5192714233398438, 0.5192642822265625, 0.5195704345703125, 0.5194055786132813, 0.5192109985351563, 0.5192069702148437, 0.5193225708007813, 0.5193994140625, 0.5194393310546875, 0.5193717651367188, 0.51938916015625, 0.5194823608398438, 0.5195161743164063, 0.5194229736328125, 0.5193123779296875, 0.5194803466796875, 0.5196267700195313, 0.5195346069335938, 0.5195284423828125, 0.5194066162109375, 0.5194495849609375, 0.5196309204101562, 0.5191699829101563, 0.5192222900390625, 0.5191680297851563, 0.5192140502929687, 0.5192509155273437, 0.5197455444335938, 0.51945166015625, 0.5199011840820312, 0.5196165161132813, 0.5196021728515625, 0.51944140625, 0.519468017578125, 0.5196871948242188, 0.5198428344726562, 0.5196503295898437, 0.5193164672851562, 1.076336669921875, 0.5190471801757812, 0.51890380859375, 0.5190809326171875, 0.5190072021484375, 0.5190051879882812, 0.519067626953125, 0.5190942993164063, 0.5192550659179688, 0.5191608276367188, 0.51911474609375, 0.5190000610351563, 0.5190625, 0.5189099731445312, 0.5189284057617187, 0.5190532836914062, 0.5191976928710937, 0.5195181884765625, 0.5195192260742187, 0.5192243041992187, 0.5194373779296875, 0.5194208374023438, 0.5193380126953125, 0.51938818359375, 0.5193707275390625, 0.5193901977539063, 0.5194732055664063, 0.5196451416015625, 0.5194977416992187, 0.5196513061523438, 0.5195796508789062, 0.5192212524414063, 0.5195172119140625, 0.5192601318359376, 0.5197967529296875, 0.5195601806640625, 0.5197598876953125, 0.5193154296875, 0.5192755126953125, 0.5190604858398438, 0.519140380859375, 0.519161865234375, 0.5192714233398438, 0.5192130737304688, 0.519172119140625, 0.5193922729492187, 0.5197404174804687, 0.5195489501953126, 0.5196461791992187, 0.5194475708007813, 0.5194373168945312, 0.5194741821289063, 0.5194240112304688, 0.5196431274414063, 0.51991552734375, 0.5195888671875, 0.5194813232421875, 0.5196830444335937, 0.5193574829101563, 0.5195447387695312, 0.519552001953125, 0.5194495849609375, 0.5193912353515625, 1.0770308837890625, 0.519299072265625, 0.519161865234375, 0.519352294921875, 0.519202880859375, 0.519232421875, 0.5192161254882812, 0.5191495971679687, 0.5193451538085937, 0.5194649658203125, 0.5192969970703125, 0.51949365234375, 0.5189734497070313, 0.5189652709960938, 0.5190738525390625, 0.5190205078125, 0.5188731079101563, 0.5189837036132813, 0.5191946411132813, 0.5191895141601562, 0.5194915771484375, 0.5194823608398438, 0.5194649658203125, 0.5193861083984375, 0.519541748046875, 0.5195222778320312, 0.5193441162109375, 0.5196687622070313, 0.5194107055664062, 0.5194291381835937, 0.5196728515625, 0.519736328125, 0.5194956665039062, 0.5196932983398438, 0.51970458984375, 0.5194557495117188, 0.5194752197265625, 0.5195489501953126, 0.5196452026367188, 0.51976904296875, 0.5196585693359375, 0.5195037841796875, 0.5197609252929688, 0.5197005004882812, 0.5194229736328125, 0.51953564453125, 0.5195899047851562, 0.5200025634765625, 0.5196328735351563, 0.5195530395507812, 0.5195929565429688, 0.5197322387695312, 0.519456787109375, 0.5192703857421875, 0.5194905395507813, 0.5194588012695313, 0.5194240112304688, 0.5194588012695313, 0.519572509765625, 0.5200353393554688, 0.5195612182617187, 0.5195438232421875, 0.5196062622070312, 1.0774559326171875, 0.5191044921875, 0.5191157836914062, 0.5191874389648438, 0.5194168090820312, 0.519161865234375, 0.5189949340820312, 0.5192335205078125, 0.5191475219726562, 0.5193502807617187, 0.5192642822265625, 0.5191946411132813, 0.5190123291015625, 0.5190532836914062, 0.5194086303710937, 0.5190615234375, 0.5189734497070313, 0.5190645751953125, 0.5192376098632813, 0.5190942993164063, 0.5191700439453125, 0.5191280517578125, 0.5192765502929687, 0.5192151489257812, 0.5193707275390625, 0.5190543212890625, 0.5191290893554688, 0.5193359375, 0.5194874877929687, 0.5193267211914062, 0.5189846801757813, 0.51919873046875, 0.5191116943359375, 0.5194178466796875, 0.51928369140625, 0.5192315063476562, 0.51917724609375, 0.5193430786132812, 0.5191895141601562, 0.5191710815429688, 0.5193011474609375, 0.5194956665039062, 0.5192806396484375, 0.519357421875, 0.5193850708007812, 0.5195530395507812, 0.5191905517578125, 0.519109619140625, 0.5193072509765625, 0.5192550659179688, 0.5195325317382813, 0.5192079467773437, 0.5195632934570312, 0.5195438232421875, 0.5192283935546875, 0.5193164672851562, 0.5193871459960937, 0.519130126953125, 0.5190543212890625, 0.519066650390625, 0.519762939453125, 0.5196871948242188, 0.51964208984375, 1.0765701904296876, 0.5193727416992188, 0.5190225830078125, 0.51900927734375, 0.51913525390625, 0.5190553588867187, 0.5191137084960937, 0.519404541015625, 0.5193369750976562, 0.5193421020507812, 0.5189796142578125, 0.5190471801757812, 0.5191260375976563, 0.5190000610351563, 0.5188761596679687, 0.5193082885742187, 0.5192929077148437, 0.5189151000976563, 0.5193871459960937, 0.5191874389648438, 0.5193277587890625, 0.5193389892578125, 0.5194967041015625, 0.5193380126953125, 0.518867919921875, 0.5191762084960938, 0.5194178466796875, 0.5198345947265625, 0.5194864501953125, 0.519278564453125, 0.5193430786132812, 0.5191434326171875, 0.51901953125, 0.519066650390625, 0.5193963623046876, 0.5190963134765625, 0.51917822265625, 0.5191843872070312, 0.5193840942382812, 0.5189468383789062, 0.5197189331054688, 0.5195069580078125, 0.5195929565429688, 0.5196318969726562, 0.5192345581054687, 0.5199708251953125, 0.5198817138671875, 0.5194895629882812, 0.5193318481445313, 0.51960009765625, 0.519520263671875, 0.5197168579101562, 0.5195089721679688, 0.5197127685546875, 0.5197619018554688, 0.5193789672851562, 0.5196011352539063, 0.5199605712890625, 0.5192171630859375, 0.519773193359375, 0.5193154296875, 0.5193421020507812, 0.5191127319335938, 1.076674560546875, 0.5193175048828125, 0.5198510131835937, 0.5194219360351563, 0.5192171630859375, 0.5192335205078125, 0.5190491943359375, 0.5191076049804687, 0.519288818359375, 0.519351318359375, 0.51945166015625, 0.5192109985351563, 0.5194281005859375, 0.5190523071289063, 0.518887451171875, 0.5189160766601563, 0.5193380126953125, 0.5191219482421875, 0.5194137573242188, 0.5189949340820312, 0.5194024658203125, 0.5194833984375, 0.5191393432617187, 0.5193871459960937, 0.5191823120117187, 0.519035888671875, 0.5191137084960937, 0.5192263793945312, 0.5193850708007812, 0.5195980834960937, 0.519099365234375, 0.5191802978515625, 0.519103515625, 0.5190819702148437, 0.5191076049804687, 0.5194823608398438, 0.519362548828125, 0.5193380126953125, 0.5192847290039062, 0.5194066162109375, 0.5196646118164062, 0.519319580078125, 0.5197086791992187, 0.5193389892578125, 0.5191321411132812, 0.5191423950195313, 0.5191177978515625, 0.51974658203125, 0.5195028686523437, 0.5193082885742187, 0.5194967041015625, 0.5192734985351563, 0.5193185424804687, 0.5191393432617187, 0.5193789672851562, 0.5193001098632812, 0.519245849609375, 0.5193215942382813, 0.5193871459960937, 0.5200670776367188, 0.5194752197265625, 0.5193564453125, 0.519161865234375, 1.0765865478515626, 0.5188423461914062, 0.5193768920898437, 0.5195335693359375, 0.5195069580078125, 0.5196103515625, 0.5192714233398438, 0.5193687133789062, 0.5194761962890625, 0.5191976928710937, 0.5193594970703125, 0.5194977416992187, 0.5192212524414063, 0.5192960205078125, 0.5195693969726562, 0.51974755859375, 0.519372802734375, 0.5196605224609375, 0.5194926147460938, 0.5194332275390625, 0.5192581176757812, 0.5195612182617187, 0.519888916015625, 0.52008251953125, 0.519522216796875, 0.519762939453125, 0.5197742309570312, 0.5199441528320312, 0.5196011352539063, 0.5197128295898438, 0.519615478515625, 0.5195438232421875, 0.5197282104492188, 0.52025439453125, 0.52010595703125, 0.5197557983398438, 0.5197957153320313, 0.5193380126953125, 0.5193861083984375, 0.5192591552734375, 0.5193103637695312, 0.5194598388671875, 0.519741455078125, 0.5195346069335938, 0.519635986328125, 0.5194772338867187, 0.5194158325195313, 0.5195806884765625, 0.5193267211914062, 0.5193666381835937, 0.5194485473632813, 0.5195233154296875, 0.5197117309570313, 0.519857177734375, 0.519552001953125, 0.519488525390625, 0.5193113403320313, 0.519372802734375, 0.5194302368164062, 0.5193245849609375, 0.5197352905273438, 0.51986328125, 0.5195817260742187]",tokens/s,1.8962938209701676,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2832.146432,8389.132288,0.0,7742.685184,7007.0144,s,10,5.830655212402344,0.5830655212402344,0.0011864011955084602,0.5831084289550781,0.5846742309570313,0.5847881042480468,0.5848792028808594,"[0.5832706909179688, 0.5849019775390625, 0.5821006469726563, 0.5817232055664062, 0.582320068359375, 0.5811737670898437, 0.5829461669921875, 0.5834404907226562, 0.5841292724609375, 0.58464892578125]",tokens/s,439.05871754423805,kWh,6.867315049892591e-06,3.7630052303029514e-06,3.309072091699999e-05,4.372104119719553e-05,tokens/kWh,5855304.288051151,MB,2832.146432,8389.132288,0.0,7742.685184,7283.984384,s,10,342.646671875,34.264667187499995,0.008129823876006116,34.26347265625,34.275117968749996,34.278303125,34.28085125,"[34.26221484375, 34.28148828125, 34.27441015625, 34.256734375, 34.26859765625, 34.26473046875, 34.2578125, 34.267296875, 34.25966015625, 34.2537265625]",tokens/s,1.8386286857904421,kWh,0.0004044825890237167,0.00022169153025011835,0.0019209350367467965,0.002547109156020632,tokens/kWh,24733.92231781122,,s,629,347.34827374267576,0.5522230107196754,0.06935218607240677,0.5438034057617187,0.5445187377929688,0.5449166748046874,1.1266134716796876,"[0.54485400390625, 0.5438034057617187, 0.543246337890625, 0.5442498779296875, 0.5438187255859375, 0.544405517578125, 0.5433927612304688, 0.5439907836914063, 0.5433046875, 0.5437378540039063, 0.5438269653320312, 0.5440072021484375, 0.543795166015625, 0.544226318359375, 0.5433211059570312, 0.5441300659179688, 0.54451708984375, 0.5443307495117188, 0.5436416015625, 0.5442396240234375, 0.5436170043945312, 0.545089599609375, 0.543943603515625, 0.5440245971679688, 0.5435012817382813, 0.54389453125, 0.543447021484375, 0.544269287109375, 0.5434480590820312, 0.5442416381835937, 0.5436282958984375, 0.5436856079101563, 0.5433425903320312, 0.5441137084960938, 0.5437623901367188, 0.5438228759765625, 0.54314599609375, 0.543266845703125, 0.5434480590820312, 0.54394677734375, 0.543478759765625, 0.544342041015625, 0.5433477172851563, 0.5437071533203125, 0.5434163208007813, 0.5436990356445313, 0.5434121704101562, 0.5436589965820312, 0.5431746826171875, 0.5436907348632812, 0.5431572265625, 0.5437921142578125, 0.5443922119140625, 0.5438034057617187, 0.5433538818359375, 0.5437992553710937, 0.54312548828125, 0.5436221313476562, 0.5442550048828125, 0.5447546997070313, 0.5435003051757813, 0.544110595703125, 1.129606201171875, 0.5438555908203125, 0.5446000366210938, 0.5434962158203125, 0.5441279907226563, 0.5435760498046875, 0.5443614501953125, 0.5440512084960938, 0.5445314331054687, 0.544, 0.544395263671875, 0.5442437133789062, 0.5449338989257813, 0.5445150756835937, 0.5455985107421875, 0.5442651977539062, 0.5440481567382812, 0.5437183837890625, 0.5441484985351562, 0.5433558959960938, 0.544912353515625, 0.5442252807617187, 0.544775146484375, 0.5442785034179688, 0.5442467651367188, 0.5443768310546875, 0.544701416015625, 0.5438463745117188, 0.5441720581054688, 0.543763427734375, 0.5439447021484375, 0.5434183959960938, 0.5442539672851563, 0.543873046875, 0.5442447509765626, 0.5435484008789062, 0.544189453125, 0.5435852661132813, 0.5441228637695312, 0.5439774780273438, 0.5444413452148438, 0.5440932006835938, 0.5443020629882812, 0.5437276000976563, 0.5443696899414062, 0.5436436767578126, 0.5473484497070312, 0.5439129638671875, 0.5444976806640625, 0.54388427734375, 0.5441033935546875, 0.5436795043945313, 0.5439283447265625, 0.5437163696289062, 0.5439866943359375, 0.5433651123046875, 0.5438566284179688, 0.5434593505859375, 0.5443276977539062, 0.5436395263671875, 0.54405224609375, 0.5434828491210938, 0.5446829833984375, 1.1269549560546874, 0.5436589965820312, 0.5440983276367187, 0.5438801879882813, 0.5444874267578125, 0.544152587890625, 0.5444679565429688, 0.5439887084960937, 0.5441515502929688, 0.5438320922851563, 0.54458984375, 0.5438555908203125, 0.544068603515625, 0.5435648193359375, 0.5439713134765625, 0.5431132202148438, 0.5440512084960938, 0.54389453125, 0.5443778686523437, 0.543446044921875, 0.5438597412109375, 0.5438648071289063, 0.5439907836914063, 0.5458063354492187, 0.5444915161132813, 0.543572998046875, 0.5443184814453125, 0.5437357788085937, 0.543878173828125, 0.5435125732421875, 0.5439928588867188, 0.5434962158203125, 0.544068603515625, 0.5433712768554687, 0.5437890625, 0.5446963500976563, 0.5459650268554688, 0.5440255737304688, 0.5438883666992187, 0.5443931884765625, 0.5446287231445313, 0.5444362182617187, 0.5449840698242188, 0.5441444091796875, 0.5444034423828125, 0.543510498046875, 0.5440491333007812, 0.5454448852539062, 0.5443717041015625, 0.5432412109375, 0.5437112426757813, 0.5432013549804687, 0.5434971313476562, 0.5437726440429688, 0.5439866943359375, 0.5433477172851563, 0.5437194213867188, 0.5431173095703125, 0.5434132690429687, 0.5431746826171875, 0.544648193359375, 0.5442304077148438, 0.5442119750976563, 1.1263170166015626, 0.5434583129882813, 0.5442969360351563, 0.543373291015625, 0.5437562866210938, 0.543373291015625, 0.5437644653320313, 0.5435750122070313, 0.5440972900390625, 0.5432647705078125, 0.5439580078125, 0.5433252563476563, 0.5438104858398437, 0.5432924194335937, 0.5439324340820313, 0.5438750610351563, 0.5440696411132813, 0.543283203125, 0.543783935546875, 0.54326171875, 0.5440430297851563, 0.543267822265625, 0.5438822631835938, 0.54329443359375, 0.5437296752929688, 0.5433179931640625, 0.5443358764648437, 0.5437020263671875, 0.5440706787109375, 0.5436016845703125, 0.5435985717773437, 0.54325244140625, 0.5440829467773437, 0.5445253295898438, 0.5435402221679687, 0.54327294921875, 0.5437296752929688, 0.5436436767578126, 0.54380029296875, 0.5434234619140625, 0.5437747192382812, 0.5434429321289063, 0.5436068115234375, 0.543056884765625, 0.5473935546875, 0.54410546875, 0.5440604248046875, 0.5434931030273438, 0.5439365234375, 0.5433773803710937, 0.5438289794921874, 0.5436354370117188, 0.5443809204101563, 0.5432913818359375, 0.5437869873046876, 0.5433190307617187, 0.5438975830078125, 0.54358837890625, 0.543973388671875, 0.5433487548828125, 0.5437777709960937, 0.5435279541015625, 0.5438392333984375, 1.126728759765625, 0.5433446655273437, 0.544162841796875, 0.5432760620117187, 0.5441720581054688, 0.5443881225585937, 0.54490625, 0.54394677734375, 0.5442590942382812, 0.5439252319335938, 0.544701416015625, 0.5439826049804688, 0.5446226196289062, 0.5433385009765626, 0.5438064575195313, 0.5434009399414063, 0.5440983276367187, 0.5450567626953124, 0.5439652099609374, 0.5432319946289063, 0.5438095092773437, 0.5432586059570312, 0.5437614135742187, 0.543435791015625, 0.544484375, 0.5434317016601562, 0.5437009887695312, 0.5431490478515625, 0.5438167114257813, 0.543298583984375, 0.5435801391601562, 0.5435494995117187, 0.5441719970703125, 0.5433681640625, 0.5436928100585937, 0.5437245483398437, 0.5445037841796875, 0.5434193725585937, 0.5443307495117188, 0.543531005859375, 0.5440133056640625, 0.5436139526367187, 0.5439487915039063, 0.54411572265625, 0.5449932861328125, 0.5437962036132813, 0.5439794921875, 0.543466552734375, 0.5439456787109375, 0.543562744140625, 0.5440255737304688, 0.5437542114257813, 0.5439508666992188, 0.5435565795898437, 0.5450424194335938, 0.5437081298828125, 0.5441198120117188, 0.5460613403320312, 0.5440328369140625, 0.5436876220703125, 0.5439970092773437, 0.5443685913085937, 0.5439518432617187, 1.1256268310546875, 0.5433876342773437, 0.5439027099609375, 0.5437491455078125, 0.5440143432617187, 0.5435873413085938, 0.5440634765625, 0.5435217895507812, 0.544837646484375, 0.5441290283203125, 0.5442662353515625, 0.5436077880859375, 0.5440040893554687, 0.5432811279296875, 0.5437716674804688, 0.5439754028320313, 0.543921142578125, 0.5436795043945313, 0.5448601684570312, 0.543963134765625, 0.5444270629882813, 0.5435064086914062, 0.5439723510742187, 0.5439713134765625, 0.5442775268554687, 0.5437449951171875, 0.544595947265625, 0.5438966064453125, 0.5437860107421875, 0.5432279052734375, 0.5435350952148438, 0.5433651123046875, 0.5438668823242188, 0.5433426513671875, 0.5449727783203125, 0.5435340576171875, 0.5439334106445313, 0.54326171875, 0.5467739868164062, 0.5434767456054688, 0.5440921630859376, 0.5435054931640625, 0.544133056640625, 0.5435484008789062, 0.5440020751953125, 0.5437808837890625, 0.5440993041992187, 0.5436477661132812, 0.5438197631835937, 0.5434757690429688, 0.54377978515625, 0.5431531372070313, 0.5442908325195313, 0.5436088256835937, 0.5439539184570312, 0.54348291015625, 0.54430517578125, 0.5432504272460937, 0.5439170532226563, 0.5432053833007813, 0.5438597412109375, 0.5436006469726562, 0.543825927734375, 1.1284039306640625, 0.5435811767578125, 0.54392626953125, 0.5432975463867188, 0.5440030517578125, 0.5434276123046875, 0.5444700317382812, 0.5436016845703125, 0.5438228759765625, 0.5433507690429688, 0.5437460327148438, 0.5433599853515625, 0.5435975952148437, 0.5432709350585937, 0.54407373046875, 0.5433487548828125, 0.5436477661132812, 0.5439201049804687, 0.544205810546875, 0.5434214477539062, 0.543847412109375, 0.5432391967773438, 0.5437501220703125, 0.54501171875, 0.543856689453125, 0.5445160522460938, 0.544321533203125, 0.5439661865234375, 0.5444495849609375, 0.5435463256835937, 0.543562744140625, 0.5435699462890625, 0.5438617553710937, 0.5433108520507812, 0.5436497802734375, 0.5432432861328125, 0.5446655883789062, 0.5437214965820313, 0.5437655029296875, 0.543151123046875, 0.5436876831054688, 0.54325146484375, 0.5437511596679687, 0.5447864379882813, 0.5438218994140624, 0.5431879272460938, 0.543825927734375, 0.543182861328125, 0.5435903930664062, 0.54308349609375, 0.5436334228515625, 0.5431613159179688, 0.5436016845703125, 0.5431839599609375, 0.5440931396484375, 0.544869384765625, 0.543978515625, 0.5436641235351563, 0.5438402709960938, 0.54329443359375, 0.5435975952148437, 0.5432872924804687, 0.5449031982421875, 1.12785302734375, 0.5432197265625, 0.5443031005859374, 0.543805419921875, 0.5440696411132813, 0.543562744140625, 0.5437880249023438, 0.5434265747070313, 0.54437890625, 0.5443410034179688, 0.5438085327148438, 0.5433548583984374, 0.5437552490234375, 0.5441167602539062, 0.5438873901367187, 0.5431286010742188, 0.5460684814453125, 0.5433446655273437, 0.54401123046875, 0.5432525024414062, 0.5443113403320312, 0.5433211059570312, 0.5440449829101562, 0.54329443359375, 0.5437286376953125, 0.5433364868164062, 0.5437962036132813, 0.5443225708007813, 0.544879638671875, 0.543446044921875, 0.543447021484375, 0.5436170043945312, 0.5437798461914063, 0.543077392578125, 0.5447864379882813, 0.5435914306640625, 0.5437706298828126, 0.5436293334960938, 0.5449195556640625, 0.5433282470703125, 0.544373779296875, 0.543984619140625, 0.54413720703125, 0.5432801513671875, 0.5436323852539062, 0.5439119262695312, 0.5450363159179688, 0.543909912109375, 0.5446492309570312, 0.5441402587890625, 0.5445682983398438, 0.5445365600585937, 0.5437798461914063, 0.5442201538085938, 0.5439365234375, 0.5434225463867187, 0.5440900268554687, 0.5439365844726562, 0.5439845581054688, 0.5436405639648437, 0.5438587036132813, 0.5438146362304688, 0.5440071411132813, 1.1298836669921875, 0.5435586547851563, 0.5440675659179688, 0.5432913818359375, 0.5438289794921874, 0.5433005981445312, 0.5437767944335937, 0.544300048828125, 0.54409521484375, 0.543405029296875, 0.54519091796875, 0.5432658081054688, 0.5438023681640625, 0.5436221313476562, 0.5439180908203125, 0.5432565307617188, 0.5435023193359375, 0.5447024536132813, 0.54401953125, 0.5435442504882813, 0.5441484985351562, 0.5433333740234375, 0.5439180908203125, 0.5433262329101562, 0.5437921142578125, 0.5436846313476562, 0.5451581420898437, 0.54331494140625, 0.5439129638671875, 0.5431747436523438, 0.5436364135742188, 0.543467529296875, 0.5444925537109375, 0.5434685668945313, 0.5439539184570312, 0.543331298828125, 0.5441863403320313, 0.5441474609375, 0.5438177490234375, 0.5437880249023438, 0.5436282958984375, 0.5433865966796875, 0.5437327270507812, 0.5433026733398437, 0.5443440551757812, 0.5432484130859375, 0.5437440185546875, 0.5434849243164063, 0.5438463745117188, 0.5431654663085937, 0.5436897583007813, 0.5432340698242187, 0.5437265625, 0.5432975463867188, 0.543541259765625, 0.5436201171875, 0.5445703735351562, 0.543752197265625, 0.5440389404296875, 0.5439641723632812, 0.5437122802734375, 0.543457275390625, 0.5449390258789063, 1.1290306396484375, 0.5434531860351562, 0.5440993041992187, 0.5435391845703125, 0.5443635864257812, 0.54331591796875, 0.5436958618164063, 0.54315625, 0.54371533203125, 0.5437439575195312, 0.544100341796875, 0.5435381469726562, 0.5437388916015625, 0.5432217407226563, 0.5436754150390625, 0.54318896484375, 0.5440829467773437, 0.544564208984375, 0.543515625, 0.5431910400390625, 0.5438494873046875, 0.54326171875, 0.5435248413085938, 0.5433456420898437, 0.5437112426757813, 0.5435238647460937, 0.5456578369140626, 0.5435504760742188, 0.5442672729492187, 0.543446044921875, 0.54443212890625, 0.5432156372070313, 0.5440870971679688, 0.5433077392578125, 0.5435750122070313, 0.5440040893554687, 0.5438587036132813, 0.5430794067382813, 0.5436282958984375, 0.5436846313476562, 0.5436190795898438, 0.5430947875976563, 0.5436282958984375, 0.5432340698242187, 0.5435166625976563, 0.5432954711914062, 0.5439907836914063, 0.5439354858398437, 0.5442140502929688, 0.543595458984375, 0.544216064453125, 0.5438013305664062, 0.5442723999023438, 0.5444034423828125, 0.5439794921875, 0.54333642578125, 0.5436118774414063, 0.5431787719726563, 0.5439519653320313, 0.5432032470703125, 0.5435668334960938, 0.5430866088867188, 0.5438269653320312]",tokens/s,1.810862605483909,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,,,MB,1625.034752,2254.962688,0.0,1608.515584,1463.6928,s,10,1.2100343170166015,0.12100343170166014,0.001022141281647672,0.12073452758789063,0.12208388671874999,0.1227883918762207,0.12335199600219726,"[0.1234928970336914, 0.12192733001708984, 0.12002118682861328, 0.12029216003417968, 0.12038220977783202, 0.1198985595703125, 0.1206871337890625, 0.12078192138671875, 0.12138086700439453, 0.12117005157470703]",tokens/s,2115.6424772413106,kWh,1.4169729851856437e-06,7.764314618920512e-07,6.3009111253815796e-06,8.494315572459274e-06,tokens/kWh,30137801.90013389,MB,1625.034752,2254.962688,0.0,1608.515584,1560.974848,s,10,70.266875,7.0266874999999995,0.015802312461115554,7.021189208984375,7.04873154296875,7.0518982421875,7.0544316015624995,"[7.05506494140625, 7.04802783203125, 7.0200771484375, 7.04010400390625, 7.03416455078125, 7.01334814453125, 7.0082919921875, 7.02230126953125, 7.0167861328125, 7.008708984375]",tokens/s,8.965817819562917,kWh,8.271868514320838e-05,4.533574606695635e-05,0.00036366391262401533,0.0004917183438341799,tokens/kWh,128122.12680282927,,s,629,71.24009573364258,0.11325929369418533,0.01441184935436087,0.111351806640625,0.11215667266845704,0.11239075622558593,0.2323562939453125,"[0.11357389068603516, 0.11349612426757813, 0.11200812530517579, 0.11255910491943359, 0.1121167984008789, 0.11198560333251953, 0.11219558715820313, 0.11200819396972657, 0.11196825408935547, 0.11275468444824219, 0.11222630310058594, 0.11180441284179687, 0.11134464263916016, 0.11147980499267578, 0.11213721466064454, 0.11178495788574219, 0.11166719818115234, 0.11174092864990234, 0.11117977905273438, 0.1110456314086914, 0.11137638092041016, 0.11204710388183593, 0.11143679809570313, 0.11129449462890625, 0.11121353912353515, 0.1112074203491211, 0.11121561431884766, 0.11156582641601563, 0.11124736022949219, 0.1111910400390625, 0.1112965087890625, 0.11185664367675781, 0.11158732604980469, 0.1118187484741211, 0.11172659301757812, 0.11204096221923829, 0.11242803192138671, 0.11206655883789063, 0.11170611572265625, 0.1124925765991211, 0.11240444946289062, 0.11224883270263672, 0.11220172882080078, 0.11224473571777344, 0.11245168304443359, 0.11209001922607421, 0.11190579223632813, 0.11188838195800781, 0.11227750396728516, 0.11232051086425782, 0.11256114959716797, 0.11237478637695313, 0.11200921630859376, 0.11215462493896484, 0.1119549789428711, 0.11200611114501953, 0.11207987213134765, 0.11194371032714844, 0.11197846221923828, 0.11205836486816406, 0.11220787048339843, 0.11210034942626954, 0.2345594940185547, 0.1117286376953125, 0.11186585235595703, 0.11204914855957031, 0.11177677154541016, 0.1115832290649414, 0.11116851043701172, 0.1114071044921875, 0.11131391906738282, 0.11137741088867187, 0.11209728240966797, 0.11197542572021485, 0.11190681457519532, 0.11151052856445312, 0.11113471984863281, 0.11245875549316406, 0.11208191680908203, 0.11218540954589844, 0.11216889953613281, 0.11212287902832031, 0.11191302490234376, 0.11190470123291016, 0.11167948913574219, 0.11192729949951172, 0.11203174591064453, 0.11200819396972657, 0.11200409698486329, 0.11204198455810546, 0.11203584289550782, 0.11207577514648437, 0.11203174591064453, 0.11206861114501954, 0.11190476989746094, 0.11196518707275391, 0.11201945495605468, 0.11220070648193359, 0.1121239013671875, 0.11213619232177735, 0.11214745330810547, 0.11210034942626954, 0.11117158508300781, 0.11123609924316406, 0.11129138946533203, 0.11250688171386719, 0.11131903839111328, 0.11113369750976562, 0.11244338989257813, 0.11134259033203125, 0.11169075012207032, 0.11205427551269531, 0.11214745330810547, 0.11191705322265624, 0.11207884979248046, 0.11197849273681641, 0.11211673736572265, 0.11211161804199218, 0.11191500854492188, 0.11222118377685547, 0.11239730834960937, 0.11132415771484375, 0.11126681518554687, 0.1112985610961914, 0.11182182312011718, 0.23266201782226562, 0.11202662658691406, 0.11120025634765625, 0.11109683227539062, 0.11149005126953125, 0.11119513702392578, 0.11112754821777343, 0.11123916625976563, 0.11105177307128906, 0.1111900177001953, 0.11129036712646484, 0.11116851043701172, 0.11114803314208985, 0.11196927642822266, 0.11116134643554687, 0.11125965118408203, 0.11103539276123046, 0.11127398681640625, 0.11154434967041016, 0.11195798492431641, 0.11191193389892579, 0.1111377944946289, 0.1111562271118164, 0.11188735961914062, 0.11112754821777343, 0.11104972839355469, 0.11122278594970703, 0.11126271820068359, 0.11140402984619141, 0.11229183959960938, 0.11202559661865234, 0.11156588745117188, 0.1111817626953125, 0.11196927642822266, 0.11145932769775391, 0.11130470275878906, 0.11111219024658203, 0.11142041778564453, 0.11112140655517579, 0.11115110778808594, 0.11208601379394531, 0.11188428497314454, 0.11182284545898437, 0.11141017913818359, 0.11125043487548827, 0.11123404693603516, 0.11116646575927734, 0.11186994934082031, 0.11114189147949219, 0.11129446411132812, 0.11111833953857422, 0.1112442855834961, 0.11120845031738281, 0.11120435333251953, 0.11193344116210938, 0.11145116424560547, 0.11117052459716797, 0.11132723236083984, 0.11132109069824218, 0.1112074203491211, 0.11136307525634766, 0.11232870483398437, 0.11170201873779297, 0.2324981689453125, 0.11132723236083984, 0.11152384185791016, 0.11143577575683594, 0.11334963226318359, 0.11229798126220703, 0.11186176300048828, 0.11187916564941407, 0.11197337341308594, 0.11203993225097657, 0.11198976135253906, 0.11205836486816406, 0.11139788818359375, 0.11115929412841796, 0.11112754821777343, 0.11136921691894532, 0.11119513702392578, 0.11117056274414062, 0.11119206237792968, 0.11117465972900391, 0.11126067352294922, 0.11122380828857421, 0.11115827178955077, 0.11134873962402343, 0.1123768310546875, 0.11169586944580077, 0.11113676452636718, 0.11131903839111328, 0.11172249603271485, 0.11171635437011719, 0.11152793884277344, 0.11154022216796874, 0.11121971130371094, 0.11125247955322265, 0.11109174346923828, 0.11109065246582031, 0.111283203125, 0.11159961700439452, 0.11181670379638672, 0.11218841552734375, 0.11207065582275391, 0.11193856048583985, 0.11212083435058594, 0.11199385833740234, 0.11282943725585938, 0.11240140533447265, 0.11203072357177735, 0.11216486358642579, 0.11200102233886719, 0.11202969360351563, 0.112, 0.11213005065917969, 0.11189555358886719, 0.11197647857666015, 0.11237065887451173, 0.11224269104003906, 0.11207270050048829, 0.11202355194091797, 0.11200204467773438, 0.11213520050048828, 0.1117081298828125, 0.11131187438964844, 0.11138969421386719, 0.23215206909179686, 0.11141426849365234, 0.11262156677246093, 0.11177779388427735, 0.11186585235595703, 0.11191295623779297, 0.11129036712646484, 0.11202355194091797, 0.11225702667236329, 0.11216793823242187, 0.11205120086669922, 0.11202252960205078, 0.11183513641357422, 0.11183513641357422, 0.1118361587524414, 0.11228672027587891, 0.11196723175048828, 0.11265023803710937, 0.11201439666748046, 0.11204192352294921, 0.11198668670654296, 0.11204812622070312, 0.1128499526977539, 0.11216585540771484, 0.11173174285888672, 0.11194057464599609, 0.11185971069335937, 0.11190889739990234, 0.11204195404052734, 0.11242700958251953, 0.1118361587524414, 0.11156070709228516, 0.11167436981201172, 0.11125555419921875, 0.11129446411132812, 0.11137535858154297, 0.1112125473022461, 0.11132723236083984, 0.11112652587890624, 0.11121356964111329, 0.11136511993408203, 0.11127808380126954, 0.11119308471679687, 0.11122278594970703, 0.11130265808105469, 0.11126374053955078, 0.11110604858398437, 0.11118386840820313, 0.11117158508300781, 0.11144499206542968, 0.11132316589355469, 0.11124323272705078, 0.11118899536132812, 0.11127705383300782, 0.11124531555175782, 0.11131084442138672, 0.11114189147949219, 0.11251712036132812, 0.11123916625976563, 0.1111562271118164, 0.11102105712890625, 0.111172607421875, 0.11132518768310547, 0.23168409729003905, 0.11126067352294922, 0.11120333099365234, 0.11120333099365234, 0.11132112121582032, 0.11132003021240235, 0.1112279052734375, 0.11117772674560547, 0.11110399627685547, 0.1111541748046875, 0.1111695327758789, 0.11118796539306641, 0.11124940490722657, 0.11112960052490234, 0.11125452423095702, 0.11125759887695312, 0.11122585296630859, 0.11124224090576172, 0.11116646575927734, 0.11114598083496094, 0.11131494140625, 0.11128832244873046, 0.11116134643554687, 0.11131391906738282, 0.11123404693603516, 0.11126681518554687, 0.11123916625976563, 0.1112985610961914, 0.11178495788574219, 0.11137741088867187, 0.11124121856689453, 0.11152281951904297, 0.11121766662597657, 0.11117362976074219, 0.1112125473022461, 0.11121868896484376, 0.111098876953125, 0.1111900177001953, 0.11227442932128906, 0.11120025634765625, 0.11103846740722656, 0.11157708740234375, 0.11146240234375, 0.11182284545898437, 0.11149517059326172, 0.111499267578125, 0.11131289672851563, 0.11149209594726563, 0.11197853088378906, 0.11136713409423828, 0.11115724945068359, 0.11126067352294922, 0.11121151733398438, 0.11126067352294922, 0.11109478759765624, 0.11183001708984375, 0.11150643157958984, 0.11150745391845703, 0.11111833953857422, 0.11116544342041015, 0.11122994995117187, 0.11125452423095702, 0.1113088607788086, 0.23280429077148437, 0.11111014556884766, 0.11133030700683594, 0.11111119842529296, 0.11112957000732422, 0.11102105712890625, 0.11110809326171875, 0.11106201934814453, 0.11103334045410156, 0.11108870697021485, 0.11110905456542969, 0.11238092803955078, 0.11154227447509765, 0.11123506927490234, 0.11109273529052735, 0.11120333099365234, 0.11103129577636718, 0.11100672149658203, 0.11094937896728516, 0.1110487060546875, 0.11100672149658203, 0.1109534683227539, 0.11100569915771484, 0.11172767639160157, 0.11108550262451172, 0.11122688293457031, 0.11107020568847656, 0.11119721221923828, 0.11113673400878907, 0.11125043487548827, 0.1111562271118164, 0.11116236877441406, 0.1111551971435547, 0.11128012847900391, 0.11118694305419922, 0.11127091217041016, 0.11107123565673828, 0.11122994995117187, 0.11122380828857421, 0.1111551971435547, 0.11100466918945312, 0.11141836547851562, 0.11136511993408203, 0.11123814392089844, 0.11123302459716797, 0.11117670440673828, 0.11112960052490234, 0.11146444702148438, 0.11135078430175781, 0.11130470275878906, 0.11135897827148437, 0.11127808380126954, 0.11142451477050781, 0.11136716461181641, 0.1120901107788086, 0.11140300750732422, 0.11124326324462891, 0.11130985260009765, 0.111257568359375, 0.1111551971435547, 0.1111756820678711, 0.11127295684814453, 0.11109580993652343, 0.23243571472167968, 0.11107839965820313, 0.11138361358642578, 0.11138758087158203, 0.111172607421875, 0.11116441345214843, 0.11112754821777343, 0.11145728302001953, 0.11120230102539062, 0.11218739318847656, 0.11158425903320313, 0.11142348480224609, 0.11124326324462891, 0.11134566497802735, 0.11134770965576171, 0.11127603149414063, 0.11175628662109376, 0.11245772552490234, 0.11182899475097656, 0.11139379119873047, 0.11116031646728515, 0.11131187438964844, 0.11135897827148437, 0.11138253021240234, 0.11129446411132812, 0.11140914916992188, 0.1112995834350586, 0.11167334747314453, 0.11146444702148438, 0.11127398681640625, 0.1114286117553711, 0.11160883331298828, 0.111388671875, 0.11126783752441406, 0.111283203125, 0.11141529846191406, 0.11138355255126953, 0.11169382476806641, 0.111425537109375, 0.11148297882080078, 0.11134454345703125, 0.11160371398925781, 0.11167334747314453, 0.11133235168457031, 0.11129036712646484, 0.11148089599609375, 0.11127085113525391, 0.11113471984863281, 0.11119821166992187, 0.11146444702148438, 0.1114419174194336, 0.11144499206542968, 0.11137741088867187, 0.11137126159667969, 0.11174604797363281, 0.11149619293212891, 0.11148902130126953, 0.1114419174194336, 0.1115525131225586, 0.11180646514892578, 0.11210854339599609, 0.11200819396972657, 0.11173580932617187, 0.2335253143310547, 0.11135481262207031, 0.11140300750732422, 0.11150540924072265, 0.11128627014160156, 0.11110707092285156, 0.11116851043701172, 0.11114393615722656, 0.1112995834350586, 0.11127808380126954, 0.11109273529052735, 0.11120953369140625, 0.1113087387084961, 0.11121971130371094, 0.11118284606933594, 0.11124736022949219, 0.1122508773803711, 0.11217715454101562, 0.1113733139038086, 0.11129344177246094, 0.11147058868408204, 0.11150335693359376, 0.111351806640625, 0.11135897827148437, 0.11148492431640625, 0.11133952331542969, 0.11130675506591797, 0.11139481353759766, 0.11144703674316406, 0.11154124450683593, 0.11132825469970703, 0.11111219024658203, 0.11122073364257813, 0.11118386840820313, 0.11125971221923828, 0.1125447006225586, 0.11146546936035157, 0.11134361267089844, 0.11123097229003906, 0.11145728302001953, 0.11127091217041016, 0.1112074203491211, 0.1114224624633789, 0.11159859466552734, 0.11146854400634766, 0.11118284606933594, 0.11170816040039062, 0.11130777740478516, 0.11138662719726562, 0.11138253021240234, 0.11114701080322266, 0.11128729248046874, 0.11132109069824218, 0.11136102294921875, 0.11118284606933594, 0.11117881774902344, 0.11109677124023437, 0.11118796539306641, 0.11113267517089843, 0.11119411468505859, 0.111246337890625, 0.1112279052734375, 0.11181465911865235, 0.23307980346679688, 0.11142041778564453, 0.1113538589477539, 0.11121561431884766, 0.11131903839111328, 0.11121561431884766, 0.11105689239501954, 0.111246337890625, 0.11114291381835938, 0.1110835189819336, 0.1110241928100586, 0.11124934387207032, 0.11121561431884766, 0.11143475341796875, 0.11115007781982422, 0.11116031646728515, 0.11110912322998047, 0.11113881683349609, 0.11112754821777343, 0.11146137237548828, 0.11116646575927734, 0.1111173095703125, 0.11114291381835938, 0.11114495849609375, 0.11101900482177735, 0.11122176361083984, 0.11194265747070313, 0.11108147430419922, 0.11138662719726562, 0.11130368041992188, 0.11116748809814453, 0.11115110778808594, 0.11116649627685547, 0.11112751770019531, 0.1111695327758789, 0.11108454132080078, 0.11120127868652344, 0.11118796539306641, 0.11125555419921875, 0.11124018859863281, 0.11112140655517579, 0.11123712158203125, 0.11195391845703125, 0.1116231689453125, 0.111388671875, 0.11123404693603516, 0.11115110778808594, 0.11121459197998047, 0.1111562271118164, 0.11116748809814453, 0.11117772674560547, 0.11126374053955078, 0.1111910400390625, 0.11120333099365234, 0.11108454132080078, 0.11108761596679688, 0.1111203842163086, 0.1111357421875, 0.11103231811523437, 0.11127603149414063, 0.11121868896484376, 0.11217817687988281, 0.11138457489013671]",tokens/s,8.829297511779728,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3081.781248,9521.594368,0.0,8875.147264,8264.141824,s,10,10.63840625,1.0638406249999999,0.0011581823391520636,1.0637650756835937,1.0652460571289062,1.0654749572753905,1.0656580773925781,"[1.0649481201171875, 1.065703857421875, 1.0634771728515624, 1.0627923583984376, 1.063096435546875, 1.0621585693359374, 1.064052978515625, 1.0644849853515626, 1.0651951904296875, 1.06249658203125]",tokens/s,240.63754850497463,kWh,1.2549749844604067e-05,6.874677577252441e-06,5.877399146358986e-05,7.819841888544636e-05,tokens/kWh,3273723.479946787,MB,3081.781248,9521.594368,0.0,8875.147264,8556.643328,s,10,631.86266796875,63.186266796874996,0.007118498406389631,63.1850546875,63.19375234375,63.198333203125,63.201997890625,"[63.192734375, 63.2029140625, 63.18565625, 63.17730078125, 63.18101953125, 63.1805546875, 63.18579296875, 63.18146875, 63.1907734375, 63.184453125]",tokens/s,0.9970520999844192,kWh,0.0007460630791551536,0.0004089103833046283,0.003496975492022614,0.004651948954482396,tokens/kWh,13542.710940389017,,s,629,640.4335947265614,1.0181774160994632,0.12635276340148643,1.0029219970703125,1.0034655395507812,1.0036467651367187,2.0656750390625,"[1.00284619140625, 1.002883056640625, 1.0025042114257812, 1.0028574829101562, 1.0028175659179688, 1.002925048828125, 1.0030203247070313, 1.0030990600585938, 1.0027622680664063, 1.0032742309570313, 1.0026455078125, 1.003062255859375, 1.0025420532226563, 1.0029680786132813, 1.0027857666015625, 1.00307763671875, 1.0029732055664062, 1.0030858154296876, 1.002630126953125, 1.0030980834960936, 1.0028267822265624, 1.003293701171875, 1.0026495971679688, 1.0032005004882814, 1.0028339233398438, 1.0035537719726562, 1.0033336181640624, 1.0031492919921876, 1.0029854736328125, 1.0029179077148438, 1.0027100219726564, 1.0030469360351562, 1.0027202758789062, 1.0029025268554688, 1.002692626953125, 1.003177978515625, 1.0032864990234376, 1.0032291870117187, 1.0031237182617188, 1.0033694458007814, 1.0031052856445313, 1.0034606323242188, 1.0029014892578125, 1.0031063232421875, 1.002977294921875, 1.0030796508789062, 1.0032691040039063, 1.003052001953125, 1.003093994140625, 1.0031728515625, 1.0026219482421874, 1.003230224609375, 1.0031892700195313, 1.003472900390625, 1.0031318969726561, 1.0034933471679688, 1.0038251342773437, 1.0035701904296874, 1.0035916748046876, 1.0035220336914064, 1.0029598999023437, 1.0031124267578124, 2.068518798828125, 1.0033325805664062, 1.0031472778320312, 1.0031912841796875, 1.0029844360351563, 1.0031206665039063, 1.0034401245117188, 1.0029373168945312, 1.0032005004882814, 1.00276123046875, 1.003198486328125, 1.0033233642578125, 1.0032588500976563, 1.0031349487304688, 1.0031708374023438, 1.0031646728515624, 1.0034074096679688, 1.0032445068359375, 1.0034769897460938, 1.0028963623046876, 1.0031943969726562, 1.0029393920898437, 1.0032639770507812, 1.0037217407226562, 1.0034319458007812, 1.0026045532226562, 1.003093017578125, 1.0026946411132813, 1.0035282592773438, 1.0035670166015624, 1.0035159301757812, 1.0033766479492188, 1.0034708251953126, 1.0032630004882812, 1.0034298706054687, 1.002693603515625, 1.0028206176757812, 1.0027888793945312, 1.0029660034179688, 1.0028257446289062, 1.0030294799804687, 1.00284619140625, 1.003236328125, 1.0038517456054687, 1.0034739379882813, 1.0031769409179687, 1.003452392578125, 1.0032343139648439, 1.0034002075195312, 1.0033592529296875, 1.0034892578125, 1.003430908203125, 1.0036060180664061, 1.0037442626953126, 1.0036817626953125, 1.003325439453125, 1.0032271118164062, 1.0029475708007813, 1.0032077026367188, 1.0028257446289062, 1.0033950805664062, 1.0032373657226563, 1.0033449096679687, 2.06620166015625, 1.003240478515625, 1.0030346069335938, 1.0028810424804688, 1.0028892211914062, 1.0025840454101562, 1.0026762084960938, 1.0028451538085938, 1.0030079956054687, 1.0030458984375, 1.0029598999023437, 1.0025256958007813, 1.002809326171875, 1.0025021362304687, 1.002545166015625, 1.002588134765625, 1.0025830688476562, 1.0028103637695311, 1.002876953125, 1.0027571411132812, 1.0027847900390625, 1.0027694091796875, 1.0029219970703125, 1.0027468872070313, 1.0030151977539064, 1.002692626953125, 1.0029998168945313, 1.0028185424804688, 1.002977294921875, 1.0030653686523439, 1.0028308715820313, 1.0027254028320312, 1.0027396850585937, 1.002481689453125, 1.0026741943359374, 1.002397705078125, 1.0026148071289063, 1.0031001586914063, 1.003051025390625, 1.0032445678710937, 1.0031226806640625, 1.00276123046875, 1.0029619140625, 1.0027110595703126, 1.00299365234375, 1.0028328857421875, 1.0028206176757812, 1.0037903442382812, 1.0038609619140626, 1.0040064086914062, 1.0035978393554688, 1.0031810302734374, 1.0034503784179687, 1.0034176025390624, 1.0035722045898436, 1.0029711303710938, 1.0032793579101562, 1.003167724609375, 1.0029445190429687, 1.0031759643554687, 1.0028656616210938, 1.002893310546875, 1.0028052368164062, 2.0654345703125, 1.0026424560546876, 1.0026322021484375, 1.0025789184570313, 1.0027899169921874, 1.0024918823242188, 1.0032875366210938, 1.002587158203125, 1.002587158203125, 1.0025154418945312, 1.0026045532226562, 1.0023638916015625, 1.002629150390625, 1.0024959716796875, 1.0028257446289062, 1.0028626098632814, 1.0027550659179687, 1.0028287963867188, 1.0026536865234374, 1.0024857788085937, 1.0027151489257813, 1.0024734497070313, 1.0031738891601563, 1.0028011474609375, 1.003345947265625, 1.002503173828125, 1.0029957275390624, 1.0027591552734374, 1.003062255859375, 1.003240478515625, 1.0029967651367186, 1.002982421875, 1.0035916748046876, 1.0024970092773438, 1.0031472778320312, 1.0028124389648438, 1.0027591552734374, 1.0026690673828125, 1.0027110595703126, 1.0026762084960938, 1.0028493041992188, 1.0025471801757813, 1.0032220458984376, 1.0027745361328124, 1.0030745849609375, 1.0030172119140626, 1.0028216552734375, 1.002661865234375, 1.0027734985351562, 1.00288818359375, 1.0029906005859375, 1.00273046875, 1.0033080444335938, 1.0029434814453124, 1.0028789672851564, 1.0029700927734375, 1.0029117431640624, 1.0027509765625, 1.002767333984375, 1.0027171630859375, 1.0029578247070312, 1.002545166015625, 1.003062255859375, 2.0657685546875, 1.0028359375, 1.0028124389648438, 1.0024847412109374, 1.0028124389648438, 1.0025441284179688, 1.0027601928710939, 1.0023495483398437, 1.0028533935546875, 1.0025604858398438, 1.0030325927734376, 1.0025574340820314, 1.0029946899414062, 1.0025062255859376, 1.0027683715820312, 1.002640380859375, 1.0027284545898438, 1.0027683715820312, 1.0030612182617187, 1.002598388671875, 1.0030366821289063, 1.0028635864257813, 1.0032056274414063, 1.0027100219726564, 1.0028585205078124, 1.0025328369140625, 1.0029660034179688, 1.0023075561523438, 1.0032752685546875, 1.002946533203125, 1.0032271118164062, 1.0027387084960937, 1.0030264282226562, 1.0025748291015626, 1.0029188842773438, 1.0027284545898438, 1.002767333984375, 1.0028052368164062, 1.0030530395507813, 1.0026967163085938, 1.0031349487304688, 1.0030233764648437, 1.0033059692382813, 1.002514404296875, 1.0027438354492189, 1.0027816772460938, 1.0031032104492188, 1.0027970581054688, 1.0033837890625, 1.0027315063476563, 1.0034063110351563, 1.0029496459960938, 1.0030530395507813, 1.0026383056640624, 1.00305615234375, 1.0027919311523437, 1.0027427978515624, 1.0032557983398438, 1.0037903442382812, 1.0031646728515624, 1.003399169921875, 1.0027448120117188, 1.0029649658203126, 2.064649169921875, 1.0027888793945312, 1.0026015014648437, 1.0022328491210937, 1.0030059814453125, 1.0026045532226562, 1.0029281005859374, 1.0032117919921875, 1.0038660888671875, 1.0030458984375, 1.0028523559570313, 1.00279296875, 1.0029219970703125, 1.00295068359375, 1.0030786743164062, 1.00269775390625, 1.0029230346679687, 1.0029240112304687, 1.0026373291015624, 1.0023751220703125, 1.0028328857421875, 1.0023956298828125, 1.002513427734375, 1.002144775390625, 1.003087890625, 1.0024324951171875, 1.0031769409179687, 1.0026843872070312, 1.0027018432617187, 1.0026710815429687, 1.0026076049804689, 1.0026281127929688, 1.00263525390625, 1.0025758666992188, 1.00335107421875, 1.0025952758789063, 1.0027919311523437, 1.0030172119140626, 1.0029312133789063, 1.0026270751953126, 1.0026127319335938, 1.0024099731445313, 1.0028635864257813, 1.0025287475585938, 1.003124755859375, 1.0027868041992187, 1.0029752197265625, 1.0032691040039063, 1.0028994750976563, 1.0029813842773438, 1.0031022338867188, 1.0025799560546875, 1.0030069580078125, 1.0030786743164062, 1.0034902954101563, 1.00322509765625, 1.0034298706054687, 1.0037340087890625, 1.0030960693359374, 1.00282470703125, 1.0031349487304688, 1.0028359375, 1.0032772827148437, 2.06746826171875, 1.0032711791992188, 1.00282470703125, 1.0029578247070312, 1.0026793212890626, 1.0024990844726562, 1.0027325439453125, 1.0022778930664062, 1.0023464965820312, 1.0023444213867188, 1.0030346069335938, 1.0042449951171875, 1.0037616577148438, 1.0032691040039063, 1.0031185913085938, 1.0024017944335937, 1.0026875, 1.0032916259765625, 1.0031452026367187, 1.0032855224609376, 1.0035599365234376, 1.0035138549804687, 1.0038927612304687, 1.0032435302734375, 1.002841064453125, 1.0024775390625, 1.0027438354492189, 1.0022512817382812, 1.002450927734375, 1.00248779296875, 1.002692626953125, 1.0029946899414062, 1.0030489501953126, 1.0032496337890624, 1.0034002075195312, 1.003052001953125, 1.0032271118164062, 1.0028707885742187, 1.0030796508789062, 1.0030028686523438, 1.0040872802734375, 1.00299365234375, 1.0028626098632814, 1.00265576171875, 1.002756103515625, 1.002808349609375, 1.0027335815429688, 1.0026813354492188, 1.00265673828125, 1.002919921875, 1.0032476196289062, 1.0029752197265625, 1.0029025268554688, 1.00297216796875, 1.0027991333007813, 1.0026178588867187, 1.002767333984375, 1.0026639404296875, 1.0027540283203125, 1.0026741943359374, 1.0029127807617189, 1.002988525390625, 1.0027018432617187, 2.066872314453125, 1.0026639404296875, 1.00259228515625, 1.0022307739257812, 1.0025277709960938, 1.0030377197265625, 1.0026045532226562, 1.002482666015625, 1.0029168701171876, 1.002771484375, 1.0028124389648438, 1.0024642333984375, 1.0027807006835938, 1.0025420532226563, 1.0025379638671874, 1.002556396484375, 1.002534912109375, 1.0028687133789063, 1.002840087890625, 1.0028328857421875, 1.0028451538085938, 1.0025236206054688, 1.0027632446289063, 1.0025379638671874, 1.0027888793945312, 1.0026751708984376, 1.003261962890625, 1.0027161865234375, 1.0028973999023438, 1.0030325927734376, 1.00279296875, 1.0027479248046876, 1.0025471801757813, 1.0023690185546874, 1.0029291381835939, 1.0029486083984376, 1.0027970581054688, 1.0028840942382813, 1.0029475708007813, 1.0030294799804687, 1.0031657104492187, 1.0029014892578125, 1.003304931640625, 1.0028472290039063, 1.0032578735351563, 1.0034647216796875, 1.0028277587890626, 1.0030632934570312, 1.0030980834960936, 1.00314111328125, 1.0028748779296874, 1.002660888671875, 1.0028431396484374, 1.0028472290039063, 1.0029691162109375, 1.0030172119140626, 1.002925048828125, 1.0033325805664062, 1.0035189819335937, 1.0036541137695312, 1.004179443359375, 1.0036951293945313, 1.002919921875, 2.06862841796875, 1.0036357421875, 1.0038538208007812, 1.0032230224609375, 1.0033796997070312, 1.002618896484375, 1.002708984375, 1.0022072143554688, 1.0026813354492188, 1.0022891235351563, 1.002555419921875, 1.0025963745117188, 1.00295166015625, 1.0026690673828125, 1.002914794921875, 1.003019287109375, 1.0031646728515624, 1.002956787109375, 1.0028861694335938, 1.0026751708984376, 1.003303955078125, 1.0028124389648438, 1.0030386962890625, 1.0028052368164062, 1.00309912109375, 1.0027315063476563, 1.0034688110351562, 1.0020556640625, 1.003325439453125, 1.0033530883789064, 1.003283447265625, 1.0034749145507813, 1.0035108032226563, 1.0035435791015626, 1.0037903442382812, 1.0029168701171876, 1.0031943969726562, 1.0024591064453126, 1.0026526489257812, 1.0026332397460938, 1.0027837524414063, 1.0034381103515626, 1.0032527465820313, 1.0027008056640625, 1.0030745849609375, 1.002945556640625, 1.0031943969726562, 1.0027479248046876, 1.0030663452148438, 1.002672119140625, 1.0029179077148438, 1.0026751708984376, 1.002841064453125, 1.0025738525390624, 1.0032977905273437, 1.0027786254882813, 1.0031134643554687, 1.00257177734375, 1.0031124267578124, 1.0026741943359374, 1.0032947387695312, 1.0030386962890625, 1.0032332763671874, 2.068487060546875, 1.0029629516601561, 1.0030684204101563, 1.0028482666015626, 1.0030899047851562, 1.0025728149414062, 1.0029260864257812, 1.0024949951171875, 1.0026229858398437, 1.0027960205078126, 1.003514892578125, 1.0023598022460938, 1.0027949829101563, 1.0025420532226563, 1.0026260375976563, 1.00259228515625, 1.0028103637695311, 1.002603515625, 1.00250830078125, 1.002608642578125, 1.0032783203125, 1.0025861206054687, 1.0032824096679687, 1.0027950439453126, 1.0030663452148438, 1.0023987426757812, 1.0027417602539062, 1.0023075561523438, 1.0025850830078125, 1.0028851318359375, 1.003109375, 1.0031339721679688, 1.003430908203125, 1.0028308715820313, 1.00331005859375, 1.0026096801757813, 1.0028687133789063, 1.0031749267578125, 1.003378662109375, 1.0034810791015625, 1.0036776733398438, 1.0027991333007813, 1.0026577758789061, 1.0023618774414063, 1.002555419921875, 1.0025308227539063, 1.00274072265625, 1.0025973510742188, 1.0027908935546874, 1.003040771484375, 1.0038589477539062, 1.002471435546875, 1.003072509765625, 1.0029865112304688, 1.0029404296875, 1.0026116943359376, 1.003293701171875, 1.00347802734375, 1.0035588989257813, 1.00347802734375, 1.0037841796875, 1.0034053344726563, 1.0032077026367188]",tokens/s,0.9821471034300691,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31c2-7782bbad447632ee7870b0db;c22a3658-aa76-435d-ad6d-40123aebcfbb) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe77-3a2359041336c8df42d16438;ecf47d20-b423-4e90-a6c3-ec5b8434ffc4) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1535.045632,1705.508864,0.0,1059.06176,901.251072,s,10,1.2508666152954102,0.125086661529541,0.0018104018213424833,0.12444574356079102,0.126299910736084,0.12811838493347166,0.12957316429138183,"[0.12993685913085937, 0.12397126770019531, 0.12358041381835938, 0.124216064453125, 0.1243848648071289, 0.12450662231445313, 0.12540995025634766, 0.1256360321044922, 0.1233287353515625, 0.12589580535888673]",tokens/s,2046.5811211976581,kWh,1.4727642404526186e-06,8.07004418028454e-07,5.46138982864209e-06,7.741158487123162e-06,tokens/kWh,33069985.639208503,MB,1535.045632,1705.508864,0.0,1059.06176,931.976704,s,10,74.43358642578124,7.443358642578124,0.00946750701010035,7.4411630859375,7.453821142578125,7.459902514648437,7.464767612304687,"[7.4524697265625, 7.437251953125, 7.43481591796875, 7.44107763671875, 7.44202734375, 7.4486748046875, 7.46598388671875, 7.43750244140625, 7.4325341796875, 7.44124853515625]",tokens/s,8.463921063755025,kWh,8.849523470459816e-05,4.850131187912767e-05,0.0003201225928605506,0.0004571191394442764,tokens/kWh,137819.65042327833,,s,629,75.43097544097903,0.11992205952460892,0.01485084367631047,0.11795148468017579,0.11891920471191407,0.11957084197998047,0.24228786743164063,"[0.1226967010498047, 0.12151910400390625, 0.12056473541259766, 0.11847885131835938, 0.11765760040283203, 0.1177681884765625, 0.11789107513427734, 0.11814604949951171, 0.11932160186767578, 0.11832012939453125, 0.1177927703857422, 0.11863961791992188, 0.117970947265625, 0.11791155242919922, 0.1177927703857422, 0.11780095672607421, 0.11787673950195313, 0.11762687683105469, 0.11790541076660156, 0.11779481506347657, 0.117823486328125, 0.1177896957397461, 0.11771392059326172, 0.11764736175537109, 0.1177733154296875, 0.11770368194580078, 0.11773951721191406, 0.118076416015625, 0.11775590515136719, 0.11770674896240234, 0.11798528289794921, 0.1176780776977539, 0.117775390625, 0.1183753890991211, 0.11995954895019531, 0.11985305786132812, 0.1180579833984375, 0.11781836700439453, 0.11756646728515625, 0.11805900573730468, 0.11770265960693359, 0.11834572601318359, 0.11778355407714844, 0.11779481506347657, 0.11785932922363282, 0.11764838409423828, 0.11807437133789063, 0.11778662109375, 0.11772621154785157, 0.11770982360839843, 0.11952230072021484, 0.11834982299804687, 0.11924479675292969, 0.1183662109375, 0.11791871643066407, 0.11783168029785156, 0.11791667175292969, 0.11793510437011719, 0.11775590515136719, 0.11848908996582032, 0.11845222473144532, 0.11795967864990234, 0.24229991149902344, 0.11855974578857421, 0.11810508728027344, 0.1181112289428711, 0.11786239624023437, 0.11781222534179688, 0.11777536010742187, 0.11783475494384765, 0.1178050537109375, 0.11783270263671874, 0.11801292419433594, 0.11765145874023437, 0.11773747253417968, 0.11818905639648437, 0.11795558166503907, 0.11861913299560548, 0.11793202972412109, 0.11773951721191406, 0.11792384338378906, 0.11784806060791016, 0.11804672241210938, 0.11847372436523437, 0.11889459228515625, 0.11787366485595703, 0.11765760040283203, 0.11804364776611329, 0.1181839370727539, 0.11801395416259766, 0.11794841766357422, 0.11845529937744141, 0.11901849365234375, 0.11806412506103516, 0.11792793273925781, 0.1178419189453125, 0.11779174041748047, 0.1181470718383789, 0.11795148468017579, 0.11775794982910157, 0.11785420989990235, 0.11785318756103516, 0.11791769409179688, 0.1176842269897461, 0.11780198669433593, 0.11803135681152344, 0.11790847778320312, 0.11800678253173828, 0.11986029052734375, 0.11766470336914063, 0.11762483215332031, 0.11772927856445313, 0.11784601593017578, 0.11774259185791015, 0.1181296615600586, 0.11775794982910157, 0.11769548797607422, 0.11781222534179688, 0.11974041748046875, 0.11866316986083984, 0.11849625396728515, 0.11808665466308593, 0.11798220825195313, 0.11786956787109375, 0.11778559875488281, 0.2420991973876953, 0.11768319702148437, 0.11785318756103516, 0.1178757095336914, 0.11764019012451171, 0.11813990020751954, 0.11782553863525391, 0.11776204681396485, 0.11799756622314453, 0.11830271911621094, 0.118508544921875, 0.11805184173583984, 0.11785523223876954, 0.11794841766357422, 0.11769241333007813, 0.11777126312255859, 0.11797299194335938, 0.11789926147460937, 0.1177528305053711, 0.11770880126953125, 0.11770674896240234, 0.11790438079833984, 0.1180794906616211, 0.11948953247070312, 0.11772211456298828, 0.11786239624023437, 0.11781734466552735, 0.11803340911865234, 0.11814604949951171, 0.11789823913574218, 0.11793920135498047, 0.11805184173583984, 0.11791667175292969, 0.11798834991455077, 0.11809382629394531, 0.1178818588256836, 0.11811328125, 0.11793817901611328, 0.11791053009033203, 0.11808255767822265, 0.11819929504394532, 0.11768319702148437, 0.11867545318603516, 0.11808870697021484, 0.11771084594726562, 0.11781324768066406, 0.11782860565185548, 0.11991654205322265, 0.11770674896240234, 0.117823486328125, 0.11866214752197266, 0.11856486511230468, 0.11797299194335938, 0.11779686737060546, 0.11797913360595703, 0.1178941421508789, 0.11789619445800781, 0.11790847778320312, 0.11780812835693359, 0.11815628814697265, 0.11777945709228516, 0.11793202972412109, 0.11774361419677734, 0.24225689697265626, 0.11883827209472657, 0.11843174743652343, 0.11770162963867188, 0.11769651031494141, 0.11803648376464844, 0.11810921478271484, 0.11791152191162109, 0.11775180816650391, 0.1179842529296875, 0.11786649322509765, 0.11789823913574218, 0.11888333129882812, 0.11870105743408203, 0.11781120300292969, 0.11810918426513672, 0.11798937225341796, 0.11783577728271484, 0.11785113525390625, 0.11797196960449219, 0.11792281341552735, 0.11785011291503907, 0.11803040313720703, 0.11771692657470703, 0.117897216796875, 0.11816140747070313, 0.11904307556152344, 0.11791974639892579, 0.11789311981201171, 0.11902464294433594, 0.11833856201171875, 0.12005171203613281, 0.11821363067626953, 0.11774566650390625, 0.11858534240722657, 0.11854847717285157, 0.11799961853027344, 0.11858636474609376, 0.11794432067871094, 0.11810918426513672, 0.11841535949707031, 0.11808563232421875, 0.11770777893066406, 0.11799142456054687, 0.11910041809082031, 0.11799552154541015, 0.11794534301757813, 0.11774361419677734, 0.11787366485595703, 0.11796173095703125, 0.11805900573730468, 0.11948339080810547, 0.1178265609741211, 0.11779071807861329, 0.11765350341796875, 0.11782860565185548, 0.11791667175292969, 0.11784909057617188, 0.11769856262207032, 0.11790643310546875, 0.11776000213623047, 0.11783065795898437, 0.11767705535888671, 0.2425384979248047, 0.11768013000488281, 0.11795558166503907, 0.11765964508056641, 0.11810201263427735, 0.11820953369140624, 0.1178265609741211, 0.11962060546875, 0.11832319641113281, 0.11826278686523438, 0.11800371551513672, 0.11801292419433594, 0.11777126312255859, 0.11808979034423828, 0.1176882553100586, 0.11781632232666016, 0.11816345977783203, 0.11939839935302735, 0.11851058959960938, 0.11840716552734375, 0.11813273620605469, 0.11791053009033203, 0.1183477783203125, 0.11780403137207031, 0.11925299072265624, 0.11806310272216797, 0.11794226837158203, 0.11788288116455078, 0.11790847778320312, 0.11780812835693359, 0.11756851196289063, 0.11803648376464844, 0.11778457641601563, 0.11857817840576172, 0.11874816131591796, 0.11857202911376953, 0.11783372497558593, 0.11794432067871094, 0.11835801696777344, 0.11801395416259766, 0.11805388641357421, 0.11840716552734375, 0.11800678253173828, 0.11772723388671875, 0.11773849487304687, 0.11781427001953125, 0.11807030487060546, 0.11788591766357422, 0.1177896957397461, 0.11810610961914063, 0.11781529235839844, 0.118150146484375, 0.11891506958007812, 0.11792588806152343, 0.1180979232788086, 0.11776000213623047, 0.11800064086914062, 0.12033126068115234, 0.11792998504638671, 0.1180200958251953, 0.1176995849609375, 0.11785523223876954, 0.11783270263671874, 0.24298291015625, 0.1177528305053711, 0.11772723388671875, 0.11784909057617188, 0.11783782196044922, 0.11776409912109374, 0.1176258544921875, 0.11822592163085938, 0.11832115173339844, 0.11806719970703125, 0.11768729400634766, 0.11852082824707032, 0.11796377563476562, 0.11766067504882813, 0.11783372497558593, 0.11778457641601563, 0.11765452575683594, 0.11768319702148437, 0.11780812835693359, 0.11886284637451172, 0.11925299072265624, 0.11837849426269531, 0.11823616027832032, 0.11786137390136718, 0.11829452514648438, 0.11799961853027344, 0.11810099029541016, 0.11921817779541016, 0.11947929382324218, 0.11808767700195312, 0.11842867279052735, 0.11818086242675781, 0.11845017242431641, 0.11814604949951171, 0.11810406494140625, 0.11812249755859375, 0.118150146484375, 0.11768627166748047, 0.12074086761474609, 0.11790029144287109, 0.1178757095336914, 0.11833753967285156, 0.11824742126464843, 0.1190297622680664, 0.11828121948242187, 0.11830067443847657, 0.11778559875488281, 0.1179842529296875, 0.11804879760742187, 0.11841123199462891, 0.11772108459472656, 0.11835801696777344, 0.11804057312011719, 0.11791462707519532, 0.11991449737548829, 0.1183846435546875, 0.11778867340087891, 0.11815936279296875, 0.11795558166503907, 0.11824639892578125, 0.11795558166503907, 0.11841535949707031, 0.11960320281982421, 0.2442239990234375, 0.11785625457763672, 0.11795455932617188, 0.1188362274169922, 0.1178050537109375, 0.11798834991455077, 0.11827916717529297, 0.11871334075927735, 0.11938304138183593, 0.1188136978149414, 0.11833753967285156, 0.11944652557373046, 0.11828736114501953, 0.1178757095336914, 0.1189969940185547, 0.11945779418945313, 0.11926118469238281, 0.11858534240722657, 0.1181296615600586, 0.1187041244506836, 0.11901542663574219, 0.11876454162597656, 0.11830989074707031, 0.12047666931152344, 0.11891817474365235, 0.11892630767822265, 0.11849318695068359, 0.11825049591064453, 0.1181839370727539, 0.11833241271972657, 0.11835699462890625, 0.11903282928466796, 0.11840614318847656, 0.11857100677490234, 0.11871027374267579, 0.11851980590820313, 0.11851673889160157, 0.11861606597900391, 0.11881267547607421, 0.1186324462890625, 0.12009369659423828, 0.11938201904296875, 0.11857305908203125, 0.11787980651855469, 0.11817676544189454, 0.11832524871826172, 0.11826790618896485, 0.11835801696777344, 0.120447998046875, 0.11851776123046875, 0.11879526519775391, 0.11777536010742187, 0.11783372497558593, 0.11804057312011719, 0.11810304260253907, 0.11771186828613281, 0.11789823913574218, 0.11804160308837891, 0.11768934631347656, 0.11759923553466797, 0.11778559875488281, 0.11754393768310546, 0.11777740478515625, 0.24373759460449218, 0.11779174041748047, 0.1175920639038086, 0.11776306915283204, 0.1174999008178711, 0.11786547088623046, 0.11793408203125, 0.11796479797363281, 0.11785113525390625, 0.11759923553466797, 0.12032717132568359, 0.11938098907470703, 0.11796991729736328, 0.11770674896240234, 0.11779891204833984, 0.11797913360595703, 0.11979264068603515, 0.11813990020751954, 0.11899801635742188, 0.11855462646484374, 0.11808153533935548, 0.1180231704711914, 0.11797913360595703, 0.11860889434814453, 0.11787366485595703, 0.11785113525390625, 0.11788492584228516, 0.11816242980957031, 0.11869491577148437, 0.1198888931274414, 0.11808051300048829, 0.11794739532470704, 0.11787468719482422, 0.11797196960449219, 0.11788288116455078, 0.11782246398925782, 0.11781017303466797, 0.11793612670898437, 0.11752345275878906, 0.11755213165283203, 0.11759308624267578, 0.1179535369873047, 0.11776102447509766, 0.11792486572265624, 0.11769344329833985, 0.11778150177001953, 0.11779993438720703, 0.11772313690185547, 0.11796889495849609, 0.11796275329589843, 0.11774463653564453, 0.11760639953613282, 0.11799858856201172, 0.11777126312255859, 0.11769139099121094, 0.1178757095336914, 0.11771699523925781, 0.1177733154296875, 0.11792588806152343, 0.11762687683105469, 0.11798834991455077, 0.11797913360595703, 0.11765555572509766, 0.24334541320800782, 0.11772621154785157, 0.11778457641601563, 0.11770777893066406, 0.11806208038330078, 0.11857817840576172, 0.11794944000244141, 0.11780403137207031, 0.1176094741821289, 0.11765350341796875, 0.11788390350341797, 0.11832118225097656, 0.1179985580444336, 0.11776409912109374, 0.11778253173828125, 0.11771392059326172, 0.11762995147705078, 0.11899801635742188, 0.11768831634521484, 0.11782860565185548, 0.1177343978881836, 0.11791974639892579, 0.11767910766601562, 0.1176975326538086, 0.11781222534179688, 0.1176842269897461, 0.11812044525146484, 0.11786444854736328, 0.11779481506347657, 0.11779280090332031, 0.11815216064453125, 0.11984896087646485, 0.11806412506103516, 0.11796889495849609, 0.11819007873535156, 0.11776102447509766, 0.11791974639892579, 0.11807334136962891, 0.1183795166015625, 0.11832832336425782, 0.11772211456298828, 0.11752243041992187, 0.1177364501953125, 0.117718017578125, 0.11764736175537109, 0.11770265960693359, 0.11787673950195313, 0.11846963500976562, 0.11805900573730468, 0.11839180755615235, 0.11926834869384766, 0.11823616027832032, 0.11819110107421875, 0.11771186828613281, 0.11779891204833984, 0.11797606658935547, 0.11803033447265625, 0.11784806060791016, 0.11762380981445313, 0.11781938934326172, 0.11753062438964844, 0.1183846435546875, 0.11802214050292968, 0.24430592346191407, 0.11781120300292969, 0.11784294128417969, 0.11759410858154297, 0.11747020721435547, 0.11764940643310547, 0.11776000213623047, 0.1176657943725586, 0.11789209747314452, 0.11810304260253907, 0.11752448272705078, 0.11766681671142579, 0.11787366485595703, 0.11762278747558594, 0.11758796691894531, 0.11786854553222656, 0.11814297485351563, 0.11897650909423828, 0.11819315338134766, 0.11778355407714844, 0.11794944000244141, 0.1182208023071289, 0.1181665267944336, 0.11872051239013671, 0.11848806762695313, 0.11986124420166015, 0.12019916534423829, 0.11816754913330078, 0.1177927703857422, 0.11773849487304687, 0.11777740478515625, 0.11786956787109375, 0.11777843475341797, 0.11820543670654297, 0.1184686050415039, 0.11819417572021484, 0.1180917739868164, 0.11786137390136718, 0.11804774475097657, 0.11794329833984375, 0.11819213104248047, 0.11829759979248047, 0.11792998504638671, 0.11812147521972656, 0.11761766052246093, 0.11775590515136719, 0.11777228546142578, 0.11778253173828125, 0.11822898864746094, 0.11784601593017578, 0.11788082885742188, 0.11892332458496094, 0.11907987213134766, 0.11818086242675781, 0.11776000213623047, 0.11779891204833984, 0.1183272933959961, 0.11851878356933594, 0.11775590515136719, 0.11797401428222656, 0.11782144165039063, 0.11866828918457031, 0.11815936279296875]",tokens/s,8.338749384093028,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1232.244736,1005.060096,0.0,358.612992,318.913024,s,24,0.17188252639770507,0.007161771933237711,0.0003243953015149955,0.0070352799892425535,0.007393788623809815,0.007410750222206116,0.00821779634475708,"[0.008458335876464844, 0.007218272209167481, 0.007377503871917724, 0.007035391807556152, 0.006997471809387207, 0.006964863777160645, 0.006896895885467529, 0.006932159900665283, 0.006920639991760254, 0.006907519817352295, 0.0068839678764343265, 0.006967360019683838, 0.007303135871887207, 0.007367551803588867, 0.007091263771057129, 0.007035168170928955, 0.0072576642036437985, 0.006943456172943115, 0.007412511825561524, 0.007331488132476807, 0.006977695941925049, 0.007400767803192139, 0.00699567985534668, 0.007205760002136231]",tokens/s,35745.34380408103,kWh,8.272282266742969e-08,4.5327846553754305e-08,1.771287987213526e-07,3.051794679425366e-07,tokens/kWh,838850666.2191416,MB,1232.539648,1005.060096,0.0,358.612992,328.804864,s,24,10.164368652343752,0.4235153605143229,0.013402359087444363,0.4229962463378906,0.43468231201171875,0.43530513000488286,0.46083279296875,"[0.4684530029296875, 0.4353216552734375, 0.431840576171875, 0.4128219909667969, 0.41338916015625, 0.4116455383300781, 0.41182235717773436, 0.41141156005859375, 0.41097488403320315, 0.4117151794433594, 0.41143212890625, 0.4306710510253906, 0.4334475708007812, 0.43521148681640626, 0.41144964599609374, 0.4190294189453125, 0.4116963806152344, 0.4119750061035156, 0.43146246337890626, 0.4310239562988281, 0.42696307373046877, 0.4310359191894531, 0.43171932983398437, 0.42785531616210937]",tokens/s,148.7549351775386,kWh,4.88881335726806e-06,2.6788370271125138e-06,8.551130498695033e-06,1.611878088307561e-05,tokens/kWh,3908484.1748887296,,s,1511,10.320183299064633,0.006830035274033512,0.0009334458342907997,0.006691840171813965,0.006923264026641846,0.007306751966476441,0.013799629211425782,"[0.007993343830108643, 0.008039423942565918, 0.007952383995056152, 0.007895040035247802, 0.007833600044250488, 0.007610367774963379, 0.007690271854400635, 0.007604191780090332, 0.007493631839752197, 0.007529471874237061, 0.007613440036773681, 0.007756800174713135, 0.007661568164825439, 0.007654399871826172, 0.0076687679290771485, 0.007647200107574463, 0.007772160053253174, 0.007723008155822754, 0.00773632001876831, 0.007501823902130127, 0.0075970559120178225, 0.007692287921905518, 0.007699456214904785, 0.007568384170532226, 0.0075038719177246095, 0.0075335679054260255, 0.007614528179168701, 0.007336895942687988, 0.007300096035003662, 0.007535615921020508, 0.00753868818283081, 0.007528448104858398, 0.007377920150756836, 0.0075038719177246095, 0.007502848148345947, 0.007266304016113281, 0.007370751857757568, 0.007285759925842285, 0.007271423816680909, 0.007333888053894043, 0.007404543876647949, 0.007350272178649903, 0.007293951988220215, 0.0070860800743103025, 0.007158783912658692, 0.007108607769012451, 0.007090176105499267, 0.007136256217956543, 0.0072202239036560055, 0.007285759925842285, 0.007223296165466309, 0.007193600177764893, 0.007156735897064209, 0.007122943878173828, 0.007145472049713135, 0.007048223972320557, 0.006976480007171631, 0.0068577280044555666, 0.006855679988861084, 0.007054336071014404, 0.006946815967559815, 0.0066406397819519045, 0.01427455997467041, 0.0069928960800170895, 0.007123968124389648, 0.0074291200637817386, 0.007486495971679687, 0.007579616069793701, 0.007607327938079834, 0.007285791873931885, 0.007080895900726318, 0.006916096210479736, 0.00690176010131836, 0.006837247848510742, 0.006814720153808594, 0.006848512172698974, 0.006859776020050049, 0.006823935985565186, 0.006833151817321777, 0.006855679988861084, 0.006862847805023193, 0.006806528091430664, 0.006830080032348633, 0.0068351998329162595, 0.006816768169403077, 0.00682700777053833, 0.006862847805023193, 0.006846528053283691, 0.006804416179656983, 0.006821887969970703, 0.006904831886291504, 0.0068351998329162595, 0.006896639823913574, 0.006830080032348633, 0.006900735855102539, 0.006841343879699707, 0.00683622407913208, 0.006845439910888672, 0.006834176063537598, 0.006814720153808594, 0.006823935985565186, 0.00682700777053833, 0.006820864200592041, 0.006845439910888672, 0.00684441614151001, 0.006830080032348633, 0.006867968082427979, 0.006841343879699707, 0.006922239780426025, 0.006859776020050049, 0.006872064113616944, 0.006855679988861084, 0.006818816184997558, 0.006988800048828125, 0.006820864200592041, 0.006837247848510742, 0.006816768169403077, 0.006854656219482422, 0.00682700777053833, 0.0068055038452148435, 0.006838272094726563, 0.006840320110321045, 0.006822912216186523, 0.0068351998329162595, 0.006842368125915528, 0.014609408378601075, 0.006852608203887939, 0.006833151817321777, 0.006881279945373535, 0.006899712085723877, 0.006879231929779053, 0.006865920066833496, 0.006904831886291504, 0.006851583957672119, 0.006899712085723877, 0.006859776020050049, 0.006854656219482422, 0.006852608203887939, 0.006855679988861084, 0.0068618240356445315, 0.0068249602317810056, 0.006856768131256103, 0.006852543830871582, 0.006809599876403808, 0.0068280320167541505, 0.006817791938781738, 0.006834176063537598, 0.006813695907592773, 0.006920191764831543, 0.0068915200233459475, 0.0068280320167541505, 0.006845439910888672, 0.00683622407913208, 0.0068618240356445315, 0.0068351998329162595, 0.006834176063537598, 0.0068076162338256835, 0.006836160182952881, 0.006854656219482422, 0.006830080032348633, 0.006843391895294189, 0.006821887969970703, 0.0068689918518066405, 0.00683622407913208, 0.006937600135803222, 0.00690176010131836, 0.00684441614151001, 0.006833151817321777, 0.006840320110321045, 0.0069324798583984375, 0.006849535942077637, 0.0068280320167541505, 0.006894591808319092, 0.006841343879699707, 0.006843391895294189, 0.006812672138214112, 0.006821887969970703, 0.00682700777053833, 0.006804480075836182, 0.006867968082427979, 0.006823935985565186, 0.006815743923187256, 0.006767615795135498, 0.006802432060241699, 0.0069324798583984375, 0.006825984001159668, 0.006831103801727295, 0.006831103801727295, 0.013900799751281738, 0.00653004789352417, 0.006533120155334473, 0.006556672096252441, 0.006542335987091064, 0.00651478385925293, 0.0065207362174987795, 0.006524928092956543, 0.006504447937011719, 0.0065157442092895505, 0.0065392317771911625, 0.0065146880149841305, 0.006575104236602783, 0.0065136637687683106, 0.006525951862335205, 0.006525951862335205, 0.006590464115142822, 0.006534143924713135, 0.006563839912414551, 0.006557695865631104, 0.006511616230010986, 0.006556672096252441, 0.006681600093841553, 0.0066375679969787596, 0.006707200050354004, 0.006534143924713135, 0.006488096237182617, 0.0064992961883544925, 0.006550528049468994, 0.006586368083953857, 0.0065075201988220215, 0.006549503803253174, 0.006560768127441406, 0.006505472183227539, 0.00653107213973999, 0.00653107213973999, 0.0065136637687683106, 0.006508543968200684, 0.006520832061767578, 0.006533120155334473, 0.0065146880149841305, 0.006519807815551758, 0.006543360233306885, 0.006706175804138184, 0.006556672096252441, 0.006560768127441406, 0.006526976108551025, 0.00652185583114624, 0.0065146880149841305, 0.0065443840026855465, 0.006526976108551025, 0.006536191940307618, 0.006510591983795166, 0.006647808074951172, 0.0066109437942504885, 0.006515711784362793, 0.00653107213973999, 0.006625279903411865, 0.006593535900115967, 0.006519807815551758, 0.006575104236602783, 0.0065474557876586915, 0.006552576065063476, 0.013965312004089356, 0.006725632190704346, 0.006696959972381592, 0.006689792156219483, 0.006518784046173095, 0.006554624080657959, 0.006492159843444824, 0.0065484800338745115, 0.006560768127441406, 0.006615039825439453, 0.0065443840026855465, 0.006516736030578613, 0.0066344962120056155, 0.006715392112731934, 0.00669593620300293, 0.00672051191329956, 0.006722591876983643, 0.006665184020996094, 0.00653004789352417, 0.0065413122177124024, 0.006516736030578613, 0.0065484800338745115, 0.006526976108551025, 0.006519807815551758, 0.0065710082054138185, 0.006595583915710449, 0.006533120155334473, 0.006511616230010986, 0.006498303890228272, 0.006542335987091064, 0.006518784046173095, 0.006569983959197998, 0.006553599834442139, 0.006550528049468994, 0.00653004789352417, 0.0065075201988220215, 0.0065136637687683106, 0.006527999877929688, 0.0065136637687683106, 0.006504447937011719, 0.006568960189819336, 0.006516736030578613, 0.006509568214416504, 0.006617087841033936, 0.006519807815551758, 0.0065382399559021, 0.006511616230010986, 0.006516736030578613, 0.006520832061767578, 0.0065443840026855465, 0.0065075201988220215, 0.006515711784362793, 0.0065064959526062015, 0.00652185583114624, 0.006533120155334473, 0.0065146880149841305, 0.006527999877929688, 0.006569983959197998, 0.006503424167633057, 0.006500351905822754, 0.006516736030578613, 0.006496255874633789, 0.006520832061767578, 0.013800448417663574, 0.0065484800338745115, 0.006523903846740723, 0.006492159843444824, 0.006493184089660644, 0.006494207859039307, 0.006560768127441406, 0.006505472183227539, 0.006500351905822754, 0.006540287971496582, 0.006510591983795166, 0.006511616230010986, 0.00652185583114624, 0.006496255874633789, 0.0065710082054138185, 0.006529024124145508, 0.006532095909118653, 0.0066119680404663084, 0.006691840171813965, 0.006487040042877197, 0.006536191940307618, 0.00648089599609375, 0.00653107213973999, 0.0066078720092773435, 0.006512639999389648, 0.006558720111846924, 0.006624256134033203, 0.006502463817596436, 0.006521791934967041, 0.006515711784362793, 0.006665215969085693, 0.006515711784362793, 0.0065064959526062015, 0.006520832061767578, 0.006500351905822754, 0.006518784046173095, 0.006551551818847656, 0.00653004789352417, 0.006560768127441406, 0.0065259838104248045, 0.006515679836273193, 0.006488096237182617, 0.006514656066894531, 0.006523903846740723, 0.006515711784362793, 0.0065075201988220215, 0.006520832061767578, 0.006516736030578613, 0.006495232105255127, 0.006559743881225586, 0.006502399921417237, 0.006503424167633057, 0.006516736030578613, 0.00652185583114624, 0.006478879928588867, 0.0065075201988220215, 0.0064992961883544925, 0.006602752208709717, 0.006545407772064209, 0.006527999877929688, 0.006510591983795166, 0.006556672096252441, 0.006497280120849609, 0.013763584136962891, 0.006534143924713135, 0.006526976108551025, 0.006529024124145508, 0.0065064959526062015, 0.00652288007736206, 0.006519807815551758, 0.006511616230010986, 0.006551551818847656, 0.006550528049468994, 0.006493184089660644, 0.00652185583114624, 0.006499328136444092, 0.0065669121742248536, 0.006535168170928955, 0.006510591983795166, 0.006533120155334473, 0.0065484800338745115, 0.006524928092956543, 0.00657203197479248, 0.006549503803253174, 0.0065146880149841305, 0.006523903846740723, 0.006582272052764892, 0.006560768127441406, 0.006545407772064209, 0.006491136074066162, 0.006533120155334473, 0.006532095909118653, 0.006486015796661377, 0.006552576065063476, 0.006518784046173095, 0.006516736030578613, 0.006542335987091064, 0.0065064959526062015, 0.006583295822143555, 0.006563839912414551, 0.006527999877929688, 0.0065484800338745115, 0.006523903846740723, 0.006543360233306885, 0.0065413122177124024, 0.00653926420211792, 0.0065177597999572755, 0.00653107213973999, 0.006545407772064209, 0.00653107213973999, 0.006542335987091064, 0.0065064959526062015, 0.006540287971496582, 0.0065484800338745115, 0.006519807815551758, 0.006502399921417237, 0.006603775978088379, 0.006535168170928955, 0.0065669121742248536, 0.006512639999389648, 0.00652185583114624, 0.006523903846740723, 0.006495232105255127, 0.00653107213973999, 0.006523903846740723, 0.006518784046173095, 0.013785087585449218, 0.006524928092956543, 0.006550528049468994, 0.006527999877929688, 0.006502399921417237, 0.006593535900115967, 0.006502399921417237, 0.006545407772064209, 0.0065474557876586915, 0.006515711784362793, 0.006556672096252441, 0.0065495681762695315, 0.006510528087615966, 0.006535168170928955, 0.006501376152038574, 0.0065413122177124024, 0.006576128005981445, 0.006512639999389648, 0.0065177597999572755, 0.006511616230010986, 0.006504479885101318, 0.006548448085784912, 0.006603775978088379, 0.0065146880149841305, 0.006516736030578613, 0.006497280120849609, 0.00653004789352417, 0.006505472183227539, 0.006496255874633789, 0.006533152103424073, 0.006500319957733154, 0.006509568214416504, 0.00652185583114624, 0.006504447937011719, 0.006512639999389648, 0.006512639999389648, 0.006487040042877197, 0.00652185583114624, 0.006524928092956543, 0.006500351905822754, 0.006516736030578613, 0.006619135856628418, 0.006508543968200684, 0.006534143924713135, 0.0065413122177124024, 0.0065382399559021, 0.0065382399559021, 0.006491136074066162, 0.006525951862335205, 0.00652288007736206, 0.006512639999389648, 0.006525951862335205, 0.006505536079406738, 0.006543295860290527, 0.00653926420211792, 0.006500351905822754, 0.006529024124145508, 0.006589439868927002, 0.0065146880149841305, 0.0065064959526062015, 0.0065147199630737306, 0.006502367973327637, 0.006497280120849609, 0.013766655921936035, 0.006527999877929688, 0.00652185583114624, 0.006518784046173095, 0.006512639999389648, 0.006503424167633057, 0.00652185583114624, 0.00652185583114624, 0.006498303890228272, 0.006508543968200684, 0.006535168170928955, 0.0065064959526062015, 0.006542399883270263, 0.006519743919372559, 0.006504447937011719, 0.006533120155334473, 0.00653107213973999, 0.006560768127441406, 0.006490111827850342, 0.0065146880149841305, 0.006509568214416504, 0.006500351905822754, 0.0065177597999572755, 0.0065413122177124024, 0.0065064959526062015, 0.006504447937011719, 0.006474751949310303, 0.00652185583114624, 0.006520864009857178, 0.006483935832977295, 0.006499328136444092, 0.006504447937011719, 0.006525951862335205, 0.0065177597999572755, 0.00652185583114624, 0.0065177597999572755, 0.006525951862335205, 0.006589439868927002, 0.0064880638122558594, 0.006551551818847656, 0.006508543968200684, 0.0065443840026855465, 0.006529024124145508, 0.006518784046173095, 0.006505472183227539, 0.006511616230010986, 0.006496255874633789, 0.006481919765472412, 0.006491136074066162, 0.006533120155334473, 0.00653107213973999, 0.006512639999389648, 0.00652288007736206, 0.0065075201988220215, 0.006501376152038574, 0.006582272052764892, 0.006552576065063476, 0.006540287971496582, 0.00652185583114624, 0.006492159843444824, 0.0065372161865234375, 0.006520895957946777, 0.006529983997344971, 0.013715456008911133, 0.006525951862335205, 0.006534143924713135, 0.006516736030578613, 0.006515711784362793, 0.00652288007736206, 0.006479872226715088, 0.006540287971496582, 0.006534143924713135, 0.006509568214416504, 0.006516736030578613, 0.006504447937011719, 0.006498303890228272, 0.00652185583114624, 0.0065064959526062015, 0.00653004789352417, 0.0065781760215759275, 0.0065075201988220215, 0.0065443840026855465, 0.006538271903991699, 0.006487008094787598, 0.006527999877929688, 0.006557695865631104, 0.006527999877929688, 0.006519807815551758, 0.006497280120849609, 0.006681600093841553, 0.0065669121742248536, 0.006529024124145508, 0.006519807815551758, 0.006519807815551758, 0.0065075201988220215, 0.00653926420211792, 0.006518784046173095, 0.00658739185333252, 0.0065075201988220215, 0.006497280120849609, 0.006516736030578613, 0.006512639999389648, 0.006494207859039307, 0.006525951862335205, 0.00653107213973999, 0.006519807815551758, 0.006552576065063476, 0.0065474557876586915, 0.006553599834442139, 0.0065474557876586915, 0.006501376152038574, 0.006543360233306885, 0.0065443840026855465, 0.0065064959526062015, 0.006554624080657959, 0.006540287971496582, 0.0065413122177124024, 0.0065372161865234375, 0.006603775978088379, 0.006525951862335205, 0.006563839912414551, 0.006503424167633057, 0.006545407772064209, 0.006515711784362793, 0.006534143924713135, 0.006552576065063476, 0.013682687759399414, 0.006536191940307618, 0.0065270400047302245, 0.006512576103210449, 0.006529024124145508, 0.006494207859039307, 0.00653004789352417, 0.006556672096252441, 0.006519807815551758, 0.006551551818847656, 0.0065474557876586915, 0.0065372161865234375, 0.006563839912414551, 0.006561791896820069, 0.006527999877929688, 0.006533120155334473, 0.00653004789352417, 0.006553599834442139, 0.006523903846740723, 0.006510591983795166, 0.006527999877929688, 0.00653107213973999, 0.006479936122894287, 0.0065289602279663085, 0.006498303890228272, 0.00653926420211792, 0.00672051191329956, 0.006555647850036621, 0.006533120155334473, 0.0065382399559021, 0.006508543968200684, 0.00653004789352417, 0.0065382399559021, 0.006503424167633057, 0.006499328136444092, 0.006526976108551025, 0.0065064959526062015, 0.0065146880149841305, 0.006526976108551025, 0.006509568214416504, 0.006527999877929688, 0.006505472183227539, 0.006520832061767578, 0.006573056221008301, 0.006516736030578613, 0.0065075201988220215, 0.006510591983795166, 0.006519807815551758, 0.006508543968200684, 0.006491136074066162, 0.00652288007736206, 0.006520832061767578, 0.0065064959526062015, 0.006519807815551758, 0.006508543968200684, 0.006501376152038574, 0.006516736030578613, 0.0065146880149841305, 0.006532095909118653, 0.006524928092956543, 0.006520832061767578, 0.006516736030578613, 0.006501376152038574, 0.013792256355285644, 0.0065710082054138185, 0.00652288007736206, 0.006676544189453125, 0.007540671825408935, 0.007066624164581299, 0.0068392958641052244, 0.006879231929779053, 0.006876160144805908, 0.006834176063537598, 0.006880256175994873, 0.006866943836212158, 0.006873087882995605, 0.006825984001159668, 0.006841343879699707, 0.0068249602317810056, 0.006807551860809326, 0.006838272094726563, 0.006795263767242431, 0.006808576107025147, 0.0068055038452148435, 0.006918144226074219, 0.0068280320167541505, 0.006778880119323731, 0.006833151817321777, 0.006814720153808594, 0.006817791938781738, 0.006815743923187256, 0.006845439910888672, 0.006854656219482422, 0.00682092809677124, 0.0068156800270080566, 0.0067983360290527345, 0.0068249602317810056, 0.006822912216186523, 0.006814720153808594, 0.006783999919891357, 0.006845439910888672, 0.006808576107025147, 0.006819839954376221, 0.006809599876403808, 0.006944767951965332, 0.006852608203887939, 0.006819839954376221, 0.006840320110321045, 0.00683622407913208, 0.00681990385055542, 0.006798272132873535, 0.006862847805023193, 0.006880320072174072, 0.006816703796386719, 0.0068055038452148435, 0.006820864200592041, 0.006815743923187256, 0.006790143966674805, 0.0068577280044555666, 0.006776832103729248, 0.006856704235076904, 0.006820864200592041, 0.006874112129211426, 0.006807551860809326, 0.006838272094726563, 0.006872064113616944, 0.01447321605682373, 0.0068392958641052244, 0.006914048194885254, 0.006894591808319092, 0.006840320110321045, 0.006819839954376221, 0.006793216228485107, 0.006817791938781738, 0.0068392958641052244, 0.006800384044647217, 0.006812672138214112, 0.006815743923187256, 0.00682700777053833, 0.0067983360290527345, 0.0068055038452148435, 0.0068392958641052244, 0.006866943836212158, 0.006837247848510742, 0.006831103801727295, 0.0068392958641052244, 0.00678604793548584, 0.006795263767242431, 0.006829055786132812, 0.006802432060241699, 0.006811647891998291, 0.006818816184997558, 0.006819839954376221, 0.007336959838867187, 0.007061503887176514, 0.006850560188293457, 0.006797311782836914, 0.0069816322326660156, 0.006795263767242431, 0.006820864200592041, 0.006807551860809326, 0.006803455829620361, 0.006829055786132812, 0.006796288013458252, 0.006837247848510742, 0.006790143966674805, 0.006817791938781738, 0.006819839954376221, 0.006802432060241699, 0.00679423999786377, 0.006817791938781738, 0.006881279945373535, 0.0067696962356567384, 0.006842336177825928, 0.006814720153808594, 0.00678604793548584, 0.006843391895294189, 0.0073134078979492185, 0.00707583999633789, 0.0068884482383728025, 0.007676928043365478, 0.007090176105499267, 0.0069212160110473635, 0.006817791938781738, 0.006964223861694336, 0.00703385591506958, 0.0068392958641052244, 0.00682700777053833, 0.006873087882995605, 0.01455513572692871, 0.006838272094726563, 0.006873087882995605, 0.0068280320167541505, 0.006850560188293457, 0.006773759841918945, 0.006806528091430664, 0.006809599876403808, 0.006825984001159668, 0.006817791938781738, 0.006823935985565186, 0.0071198720932006835, 0.006949888229370117, 0.007005184173583984, 0.008019968032836914, 0.007243775844573975, 0.006923264026641846, 0.00713318395614624, 0.007066624164581299, 0.006996992111206054, 0.006957056045532227, 0.006958079814910889, 0.006940671920776367, 0.006837247848510742, 0.006847487926483154, 0.006825984001159668, 0.007000063896179199, 0.007004159927368164, 0.007004159927368164, 0.0069027838706970214, 0.006880256175994873, 0.006895679950714112, 0.006774720191955567, 0.0068392958641052244, 0.006806528091430664, 0.006803455829620361, 0.006842368125915528, 0.006960127830505371, 0.006965248107910156, 0.0068618240356445315, 0.006854656219482422, 0.006937600135803222, 0.00684441614151001, 0.006890495777130127, 0.006914048194885254, 0.006811647891998291, 0.006814720153808594, 0.006846464157104492, 0.0068280320167541505, 0.006800384044647217, 0.006813695907592773, 0.006837247848510742, 0.006803455829620361, 0.00679423999786377, 0.0068392958641052244, 0.006813695907592773, 0.006801407814025879, 0.006912000179290771, 0.006808576107025147, 0.006809599876403808, 0.006806528091430664, 0.006800384044647217, 0.007064576148986816, 0.013950976371765136, 0.006559743881225586, 0.006540351867675781, 0.006517695903778076, 0.006505472183227539, 0.006495232105255127, 0.006523903846740723, 0.006510591983795166, 0.00659660816192627, 0.006633471965789795, 0.006639616012573242, 0.006500351905822754, 0.006551551818847656, 0.0065372161865234375, 0.00653926420211792, 0.006533120155334473, 0.006512639999389648, 0.006516736030578613, 0.006492159843444824, 0.006520832061767578, 0.006509568214416504, 0.00653107213973999, 0.006512639999389648, 0.006523903846740723, 0.006508543968200684, 0.006549503803253174, 0.006542335987091064, 0.0064839677810668945, 0.006533120155334473, 0.006533120155334473, 0.006512639999389648, 0.0065075201988220215, 0.006558720111846924, 0.0064839677810668945, 0.006508543968200684, 0.006649856090545654, 0.006551551818847656, 0.0065146880149841305, 0.006499360084533692, 0.006588384151458741, 0.00653107213973999, 0.006508543968200684, 0.006524991989135742, 0.006502336025238037, 0.006525951862335205, 0.006500351905822754, 0.006506559848785401, 0.0065586562156677245, 0.0065146880149841305, 0.006532095909118653, 0.0065382399559021, 0.00653004789352417, 0.006519807815551758, 0.006494207859039307, 0.006496255874633789, 0.006491136074066162, 0.0064880638122558594, 0.0065064959526062015, 0.006510623931884766, 0.0065361599922180175, 0.006493184089660644, 0.0065075201988220215, 0.0064839677810668945, 0.013864959716796875, 0.006532095909118653, 0.00653926420211792, 0.006496255874633789, 0.006491136074066162, 0.006524928092956543, 0.00652288007736206, 0.006497280120849609, 0.006524928092956543, 0.006515711784362793, 0.0064849920272827145, 0.0065474557876586915, 0.006524928092956543, 0.006529024124145508, 0.006545407772064209, 0.006533120155334473, 0.0065064959526062015, 0.006496255874633789, 0.006502399921417237, 0.006509568214416504, 0.006509568214416504, 0.006526976108551025, 0.006535168170928955, 0.006474751949310303, 0.006523903846740723, 0.0065075201988220215, 0.006489088058471679, 0.00653107213973999, 0.0066447358131408694, 0.0065136637687683106, 0.006546432018280029, 0.006527999877929688, 0.006535200119018555, 0.0065586881637573246, 0.006535168170928955, 0.006546432018280029, 0.006505472183227539, 0.006532095909118653, 0.00653107213973999, 0.006519807815551758, 0.006527999877929688, 0.006552576065063476, 0.006491136074066162, 0.006564864158630371, 0.0065075201988220215, 0.006528031826019287, 0.006522848129272461, 0.006523903846740723, 0.0065064959526062015, 0.00743936014175415, 0.008608799934387207, 0.0074587841033935546, 0.007060480117797851, 0.006941696166992187, 0.0068280320167541505, 0.006870016098022461, 0.006840320110321045, 0.00690176010131836, 0.006821887969970703, 0.006807551860809326, 0.0068351998329162595, 0.006849535942077637, 0.0068280320167541505, 0.014097408294677734, 0.006561791896820069, 0.006553599834442139, 0.00653004789352417, 0.006556672096252441, 0.006526976108551025, 0.0065177597999572755, 0.006543360233306885, 0.00652185583114624, 0.006516736030578613, 0.006516736030578613, 0.006520832061767578, 0.006527999877929688, 0.006533120155334473, 0.006500351905822754, 0.0065372161865234375, 0.006552576065063476, 0.006498303890228272, 0.00653004789352417, 0.006487040042877197, 0.00653107213973999, 0.006527999877929688, 0.006502399921417237, 0.00653004789352417, 0.006520832061767578, 0.0064839677810668945, 0.006499328136444092, 0.006510591983795166, 0.006546432018280029, 0.006532095909118653, 0.006508543968200684, 0.006543360233306885, 0.0065413122177124024, 0.00653004789352417, 0.0066109437942504885, 0.006540287971496582, 0.00652288007736206, 0.006557695865631104, 0.006511616230010986, 0.0066406397819519045, 0.0065372161865234375, 0.006504447937011719, 0.006589439868927002, 0.006603775978088379, 0.0064839677810668945, 0.006511616230010986, 0.00653004789352417, 0.006542335987091064, 0.006554624080657959, 0.006504447937011719, 0.006520832061767578, 0.006527999877929688, 0.006511616230010986, 0.006516736030578613, 0.006532095909118653, 0.006487040042877197, 0.006592544078826904, 0.006533120155334473, 0.0065361599922180175, 0.0065064959526062015, 0.006512639999389648, 0.006505472183227539, 0.006549503803253174, 0.013765631675720215, 0.006526976108551025, 0.0065382399559021, 0.0065484800338745115, 0.006520832061767578, 0.006501376152038574, 0.006524928092956543, 0.006593535900115967, 0.006523903846740723, 0.006598656177520752, 0.006499328136444092, 0.006542335987091064, 0.006536191940307618, 0.00653004789352417, 0.0065136637687683106, 0.00653926420211792, 0.006533120155334473, 0.006525951862335205, 0.006525951862335205, 0.006510591983795166, 0.006510591983795166, 0.006495232105255127, 0.006550528049468994, 0.006529024124145508, 0.00648089599609375, 0.006518784046173095, 0.006536191940307618, 0.0065146880149841305, 0.0065064959526062015, 0.006486015796661377, 0.0066979842185974124, 0.006559743881225586, 0.006510591983795166, 0.006525951862335205, 0.006543360233306885, 0.006523903846740723, 0.006526976108551025, 0.0065413122177124024, 0.00653004789352417, 0.006499328136444092, 0.006524928092956543, 0.00653004789352417, 0.006540287971496582, 0.006515711784362793, 0.006534143924713135, 0.006490111827850342, 0.006504447937011719, 0.006503424167633057, 0.006680575847625733, 0.006489088058471679, 0.006542335987091064, 0.00648089599609375, 0.0065484800338745115, 0.006479872226715088, 0.006515711784362793, 0.006545407772064209, 0.006503424167633057, 0.006809599876403808, 0.0065669121742248536, 0.006527999877929688, 0.006543360233306885, 0.006518784046173095, 0.0065413122177124024, 0.014844927787780762, 0.006867968082427979, 0.007014400005340577, 0.006895616054534912, 0.006855679988861084, 0.006817791938781738, 0.0067983360290527345, 0.0068280320167541505, 0.006812672138214112, 0.0068280320167541505, 0.006944767951965332, 0.006852608203887939, 0.006920191764831543, 0.006803455829620361, 0.006838272094726563, 0.006853631973266602, 0.006814720153808594, 0.006803455829620361, 0.006814720153808594, 0.006807551860809326, 0.006799359798431396, 0.00679423999786377, 0.006817791938781738, 0.006821887969970703, 0.00683622407913208, 0.0068249602317810056, 0.006795263767242431, 0.006770688056945801, 0.006814720153808594, 0.006854656219482422, 0.006831103801727295, 0.006865920066833496, 0.006854656219482422, 0.0068351998329162595, 0.0068351998329162595, 0.006831103801727295, 0.006831103801727295, 0.006838272094726563, 0.0068392958641052244, 0.006834176063537598, 0.0068249602317810056, 0.006810624122619629, 0.006847487926483154, 0.006812672138214112, 0.007004159927368164, 0.006865920066833496, 0.006843391895294189, 0.006852608203887939, 0.006821887969970703, 0.006858751773834228, 0.006813695907592773, 0.006807551860809326, 0.006823935985565186, 0.006848512172698974, 0.006816768169403077, 0.006859776020050049, 0.006837247848510742, 0.006851583957672119, 0.006810624122619629, 0.006855679988861084, 0.006823935985565186, 0.006811647891998291, 0.006850592136383057, 0.014511072158813476, 0.006811647891998291, 0.006855679988861084, 0.006822912216186523, 0.006848512172698974, 0.006813695907592773, 0.006862847805023193, 0.006793216228485107, 0.006822912216186523, 0.006864895820617676, 0.006845439910888672, 0.006826015949249267, 0.006809567928314209, 0.006840320110321045, 0.006837247848510742, 0.006806528091430664, 0.006859776020050049, 0.006813695907592773, 0.006849599838256836, 0.006807487964630127, 0.006851583957672119, 0.006814720153808594, 0.006834176063537598, 0.006840320110321045, 0.006829055786132812, 0.006846464157104492, 0.006884352207183838, 0.006825984001159668, 0.006845439910888672, 0.006814720153808594, 0.006846464157104492, 0.0068618240356445315, 0.00677785587310791, 0.006863872051239014, 0.006830080032348633, 0.0068280320167541505, 0.006810624122619629, 0.006864895820617676, 0.0067983360290527345, 0.006937600135803222, 0.006856704235076904, 0.00683622407913208, 0.006834176063537598, 0.006809599876403808, 0.006858751773834228, 0.006840320110321045, 0.006808640003204346, 0.0068607358932495114, 0.006880256175994873, 0.006843391895294189, 0.00681171178817749, 0.006853568077087402, 0.006813695907592773, 0.006850560188293457, 0.006812672138214112, 0.006823935985565186, 0.0068249602317810056, 0.0069027838706970214, 0.006849535942077637, 0.00682700777053833, 0.006840320110321045, 0.006843391895294189, 0.006808576107025147, 0.013846528053283692, 0.006520832061767578, 0.006509568214416504, 0.006534143924713135, 0.006516736030578613, 0.006527008056640625, 0.006534111976623535, 0.006515711784362793, 0.006516736030578613, 0.006543360233306885, 0.006533120155334473, 0.006516736030578613, 0.006518784046173095, 0.006505472183227539, 0.006532095909118653, 0.0065443840026855465, 0.006529024124145508, 0.006532095909118653, 0.006526976108551025, 0.006515711784362793, 0.006546432018280029, 0.006532095909118653, 0.006558720111846924, 0.006774784088134766, 0.006849535942077637, 0.006837247848510742, 0.006949888229370117, 0.007171072006225586, 0.0071311678886413574, 0.00744547176361084, 0.007161856174468994, 0.0074414081573486324, 0.007116799831390381, 0.0068986878395080565, 0.0068618240356445315, 0.006876160144805908, 0.00684441614151001, 0.00683622407913208, 0.0068628802299499515, 0.0068576960563659665, 0.006879231929779053, 0.006804480075836182, 0.006847487926483154, 0.006843391895294189, 0.006845439910888672, 0.006879231929779053, 0.006862847805023193, 0.006822912216186523, 0.006791168212890625, 0.007130112171173096, 0.006842368125915528, 0.006853631973266602, 0.006847487926483154, 0.006822912216186523, 0.006852608203887939, 0.006838272094726563, 0.006830080032348633, 0.0068321280479431154, 0.006811647891998291, 0.006845439910888672, 0.006819839954376221, 0.00684441614151001, 0.0068321280479431154, 0.014599167823791503, 0.006838272094726563, 0.006821887969970703, 0.0068321280479431154, 0.006842368125915528, 0.006843391895294189, 0.00684441614151001, 0.0068249602317810056, 0.006851615905761718, 0.006811615943908692, 0.006864927768707276, 0.0068269758224487305, 0.006820864200592041, 0.006862847805023193, 0.006830080032348633, 0.0068884482383728025, 0.006837247848510742, 0.006840320110321045, 0.006854656219482422, 0.006791232109069824, 0.006830016136169434, 0.006804480075836182, 0.006800384044647217, 0.006840320110321045, 0.0068280320167541505, 0.006809599876403808, 0.00676966381072998, 0.006829055786132812, 0.006815743923187256, 0.00679423999786377, 0.006838272094726563, 0.006829055786132812, 0.006801407814025879, 0.006876160144805908, 0.006797311782836914, 0.006789120197296142, 0.00678604793548584, 0.006810624122619629, 0.006821887969970703, 0.00682700777053833, 0.006837247848510742, 0.006804480075836182, 0.006830080032348633, 0.006811647891998291, 0.006975488185882568, 0.006833151817321777, 0.006806528091430664, 0.006866943836212158, 0.00692633581161499, 0.006884352207183838, 0.006833151817321777, 0.006858751773834228, 0.006841343879699707, 0.006834176063537598, 0.006867968082427979, 0.0068618240356445315, 0.006867968082427979, 0.006899712085723877, 0.006859776020050049, 0.0068249602317810056, 0.0068321280479431154, 0.0068577280044555666, 0.006837247848510742, 0.013855744361877441, 0.006562880039215088, 0.00689247989654541, 0.006939712047576904, 0.006982592105865479, 0.006859776020050049, 0.006833151817321777, 0.006833151817321777, 0.006905888080596924, 0.006889440059661865, 0.006778880119323731, 0.0068351998329162595, 0.0068249602317810056, 0.0069621758460998535, 0.0068055357933044435, 0.0068873920440673825, 0.0068618240356445315, 0.006809599876403808, 0.006841343879699707, 0.006985727787017822, 0.006808576107025147, 0.006840320110321045, 0.006838272094726563, 0.006924287796020508, 0.006774784088134766, 0.006858751773834228, 0.006848512172698974, 0.006866943836212158, 0.006783999919891357, 0.006855679988861084, 0.006812672138214112, 0.006789120197296142, 0.006813695907592773, 0.006794271945953369, 0.006850527763366699, 0.0068055038452148435, 0.007599103927612305, 0.0069632000923156735, 0.006883327960968018, 0.006840320110321045, 0.006845439910888672, 0.006845439910888672, 0.00679423999786377, 0.006895616054534912, 0.006833151817321777, 0.006796288013458252, 0.006781951904296875, 0.006810624122619629, 0.006819839954376221, 0.0067983360290527345, 0.006820864200592041, 0.006903903961181641, 0.006820767879486084, 0.006845439910888672, 0.006820864200592041, 0.006811647891998291, 0.006808576107025147, 0.00684441614151001, 0.006811647891998291, 0.006816768169403077, 0.006833151817321777, 0.0068392958641052244, 0.006854656219482422, 0.014112768173217773, 0.006528031826019287, 0.006518752098083496, 0.006535168170928955, 0.0066826238632202144, 0.006511616230010986, 0.006540287971496582, 0.006558720111846924, 0.0065146880149841305, 0.006554624080657959, 0.006533120155334473, 0.006774784088134766, 0.006858751773834228, 0.006843391895294189, 0.006834176063537598, 0.006846464157104492, 0.006812672138214112, 0.006800384044647217, 0.006847487926483154, 0.006813695907592773, 0.006833151817321777, 0.006821887969970703, 0.006822912216186523, 0.006803455829620361, 0.006812672138214112, 0.006829055786132812, 0.006812672138214112, 0.006818816184997558, 0.0068321280479431154, 0.00682700777053833, 0.0068249602317810056, 0.006820864200592041, 0.0068351998329162595, 0.00683622407913208, 0.006767615795135498, 0.006837247848510742, 0.006819839954376221, 0.006820864200592041, 0.006815743923187256, 0.006843391895294189, 0.006804480075836182, 0.006821887969970703, 0.00683523178100586, 0.006829023838043213, 0.006842368125915528, 0.0068321280479431154, 0.006810624122619629, 0.0068392958641052244, 0.006783999919891357, 0.007003136157989502, 0.006883327960968018, 0.006802432060241699, 0.006848512172698974, 0.00683622407913208, 0.006863872051239014, 0.006843391895294189, 0.006842368125915528, 0.006903808116912841, 0.006806528091430664, 0.006872064113616944, 0.006909952163696289, 0.006910975933074951, 0.006916096210479736]",tokens/s,146.412128177699,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1373.622272,6237.454336,0.0,5591.007232,5346.100224,s,10,5.623939697265624,0.5623939697265625,0.0018664729250946,0.5620524597167968,0.563123095703125,0.5654235778808594,0.5672639636230469,"[0.5677240600585938, 0.5623804931640625, 0.5615673828125, 0.5610968627929688, 0.561818603515625, 0.5608659057617188, 0.5612388305664062, 0.5622863159179687, 0.562349365234375, 0.5626118774414063]",tokens/s,455.19691493930475,kWh,6.624708389058526e-06,3.630042331800116e-06,3.1119932303329425e-05,4.1374683024188065e-05,tokens/kWh,6187358.57989147,MB,1373.622272,6237.454336,0.0,5591.007232,5555.342336,s,10,326.497560546875,32.649756054687494,0.006483931910806726,32.650626953125,32.658396484375,32.6586953125,32.658934375,"[32.65203515625, 32.6462578125, 32.658994140625, 32.65576953125, 32.658330078125, 32.651564453125, 32.649689453125, 32.63969140625, 32.6434453125, 32.641783203125]",tokens/s,1.9295703126993242,kWh,0.0003854154377200722,0.00021124086242082436,0.0017644989764128718,0.0023611552765537684,tokens/kWh,26681.853847389422,,s,629,331.03564239501964,0.5262887796423203,0.06675128508752028,0.51820849609375,0.5186688842773437,0.5188689819335938,1.07959478515625,"[0.5180374755859375, 0.5182678833007812, 0.5178931274414063, 0.5179525146484375, 0.517771240234375, 0.5180712890625, 0.5179750366210938, 0.5180805053710937, 0.5178931274414063, 0.51806103515625, 0.5181265869140625, 0.5183170776367187, 0.5182382202148438, 0.5182545776367188, 0.5179371337890625, 0.5179320068359375, 0.5178828735351563, 0.5182423095703125, 0.5177661743164063, 0.5184501953125, 0.5183272705078125, 0.5182412719726562, 0.518329345703125, 0.5184378662109375, 0.51814501953125, 0.518319091796875, 0.5182443237304688, 0.518181884765625, 0.5181737060546875, 0.5180671997070313, 0.5180620727539063, 0.5182228393554688, 0.518096923828125, 0.5180733642578125, 0.5180712890625, 0.5181204223632813, 0.518097900390625, 0.51814501953125, 0.5181767578125, 0.5185771484375, 0.518513671875, 0.5182904052734375, 0.5183529052734375, 0.5182494506835937, 0.5183231811523438, 0.518898681640625, 0.5186907958984375, 0.5187061767578125, 0.5188659057617188, 0.5184491577148438, 0.5182515258789062, 0.5187911376953125, 0.518445068359375, 0.518139892578125, 0.5181849365234374, 0.518667236328125, 0.518803466796875, 0.5185535888671875, 0.5189119873046875, 0.518846435546875, 0.518677490234375, 0.518724609375, 1.0810408935546876, 0.518350830078125, 0.51835595703125, 0.5182074584960937, 0.518297607421875, 0.5182279663085938, 0.5184102172851562, 0.5181460571289063, 0.5180006103515625, 0.5179330444335938, 0.5180252075195313, 0.5179064331054688, 0.5179320068359375, 0.5177804565429688, 0.5182894287109375, 0.5181163330078125, 0.5180518188476563, 0.5180508422851563, 0.5179484252929687, 0.5180457153320313, 0.5182883911132813, 0.5182054443359375, 0.5181962280273438, 0.5179832153320313, 0.5181480712890625, 0.518150146484375, 0.5184532470703125, 0.5180272827148438, 0.5180825805664062, 0.518097900390625, 0.5182258911132812, 0.5182105712890624, 0.5181552734375, 0.5183569946289063, 0.518529052734375, 0.518128662109375, 0.5181829223632812, 0.5179422607421875, 0.5181511840820312, 0.518192138671875, 0.5183323974609375, 0.5181368408203125, 0.5180845947265625, 0.5180733642578125, 0.5180211181640625, 0.5182935180664062, 0.5184542846679687, 0.5183917846679688, 0.5182208251953125, 0.5182781372070312, 0.5181317138671875, 0.5180057373046875, 0.51827099609375, 0.5181931762695312, 0.5183539428710937, 0.5180200805664062, 0.5180907592773437, 0.518160400390625, 0.5181306762695312, 0.518550537109375, 0.5182576904296875, 0.5182935180664062, 0.5190768432617188, 1.0802135009765625, 0.5184102172851562, 0.5183016967773437, 0.5183057861328125, 0.518302734375, 0.5182258911132812, 0.5182843017578125, 0.5182535400390625, 0.5182894287109375, 0.5180989379882812, 0.5188106079101562, 0.5188167724609375, 0.5182730102539063, 0.5184215087890625, 0.5188740844726563, 0.518466552734375, 0.5184788208007812, 0.5185781860351563, 0.518593505859375, 0.5185474853515625, 0.5185228881835937, 0.5185556640625, 0.5184358520507812, 0.5184542846679687, 0.5182996215820312, 0.518150146484375, 0.5181685791015626, 0.5181951904296875, 0.5183805541992188, 0.5179638061523437, 0.5181430053710937, 0.5183969116210938, 0.518614013671875, 0.5185802001953125, 0.518518798828125, 0.5182832641601562, 0.5184378662109375, 0.5184542846679687, 0.5183897705078125, 0.5185802001953125, 0.5184993286132813, 0.518413330078125, 0.518582275390625, 0.5183180541992187, 0.5183262939453125, 0.5186201782226563, 0.5186109619140625, 0.5183580322265625, 0.5184757690429688, 0.5184901123046874, 0.5186160888671875, 0.5187286987304688, 0.5188710327148438, 0.5184942016601563, 0.5181839599609375, 0.5181829223632812, 0.5181522216796876, 0.518096923828125, 0.5179955444335937, 0.5181552734375, 0.51811328125, 0.5180682373046875, 0.5181757202148437, 1.07947314453125, 0.5179699096679687, 0.5179678955078125, 0.51793408203125, 0.5179218139648437, 0.5176954956054688, 0.517823486328125, 0.517739501953125, 0.51822900390625, 0.5179279174804687, 0.517939208984375, 0.5180466918945312, 0.517918701171875, 0.5178296508789062, 0.5179985961914062, 0.5177743530273438, 0.5179566040039062, 0.5178408813476563, 0.5179791259765625, 0.5178828735351563, 0.5179432983398438, 0.5181634521484375, 0.5181337890625, 0.5179771118164063, 0.5182044067382813, 0.51795556640625, 0.5179996337890626, 0.5179801635742187, 0.5180671997070313, 0.518134765625, 0.5181143188476562, 0.5187368774414063, 0.5187942504882812, 0.5189273681640625, 0.5193850708007812, 0.5187921752929687, 0.5189058837890625, 0.5189734497070313, 0.5189846801757813, 0.5192017822265625, 0.5188218994140625, 0.5181880493164063, 0.5183334350585938, 0.518235107421875, 0.5181430053710937, 0.5183231811523438, 0.5186498413085937, 0.5190901489257812, 0.5187317504882812, 0.51865087890625, 0.5185198364257813, 0.5186201782226563, 0.5188372192382813, 0.51859765625, 0.518645751953125, 0.5186754760742187, 0.518487060546875, 0.5186324462890625, 0.5185587158203125, 0.5188239135742188, 0.5186519165039063, 0.5187880859375, 0.5186846923828125, 1.0807930908203125, 0.51770166015625, 0.51816650390625, 0.5179269409179688, 0.518044677734375, 0.5178726196289063, 0.5178777465820312, 0.5178327026367188, 0.5186160888671875, 0.5184112548828125, 0.5178265380859375, 0.5183109130859375, 0.518076416015625, 0.5183846435546875, 0.5180590209960938, 0.5186488037109375, 0.5181992797851562, 0.5183733520507813, 0.5187184448242188, 0.5183948974609375, 0.5185996704101562, 0.518761474609375, 0.518898681640625, 0.5185392456054687, 0.5188966674804687, 0.51905126953125, 0.519035888671875, 0.5190471801757812, 0.5186109619140625, 0.5184972534179687, 0.5185576782226563, 0.5184890747070312, 0.518635498046875, 0.5185792236328125, 0.5188178100585937, 0.5181685791015626, 0.51841845703125, 0.5182699584960937, 0.5182064819335938, 0.518287353515625, 0.5185095825195313, 0.5183477783203125, 0.5183303833007813, 0.51831396484375, 0.5182330932617187, 0.51831396484375, 0.5185115966796875, 0.5181265869140625, 0.5181378784179688, 0.5181726684570312, 0.5181337890625, 0.5182105712890624, 0.51820849609375, 0.518345703125, 0.51832421875, 0.5183549194335938, 0.518307861328125, 0.5184307250976562, 0.5182935180664062, 0.5186068725585937, 0.5185156860351563, 0.5184860229492188, 0.5183590698242188, 1.079120849609375, 0.5180108642578125, 0.5178880004882812, 0.5183908081054688, 0.5182914428710937, 0.5184788208007812, 0.5184235229492188, 0.518445068359375, 0.518530029296875, 0.51832421875, 0.5184675903320313, 0.51827099609375, 0.5184603881835937, 0.5187942504882812, 0.5182791748046875, 0.518677490234375, 0.51831396484375, 0.5182689208984375, 0.5182883911132813, 0.5182525634765625, 0.5183006591796875, 0.5184860229492188, 0.51837646484375, 0.5183355102539062, 0.5185433349609375, 0.5184737548828126, 0.518582275390625, 0.5182945556640625, 0.5185679321289063, 0.5183539428710937, 0.5182320556640625, 0.5178674926757812, 0.5180487670898437, 0.5178419189453125, 0.518265869140625, 0.5178624267578125, 0.5180108642578125, 0.5179617309570312, 0.5181962280273438, 0.5183150024414063, 0.5182945556640625, 0.51801806640625, 0.5179739990234375, 0.5180282592773438, 0.5179627685546875, 0.5179453735351562, 0.5180723266601562, 0.5182699584960937, 0.5180877075195313, 0.5185106201171875, 0.51859765625, 0.5188925170898437, 0.5190215454101562, 0.51888232421875, 0.5181071166992187, 0.5179473876953125, 0.5181839599609375, 0.5182371826171875, 0.5180457153320313, 0.5183866577148437, 0.5179750366210938, 0.518129638671875, 0.51801806640625, 1.07964208984375, 0.5178245239257813, 0.51821875, 0.5180845947265625, 0.5184706420898437, 0.5179371337890625, 0.5181102294921875, 0.5177528076171874, 0.5178818359375, 0.5179115600585937, 0.5181040649414063, 0.51799755859375, 0.5179422607421875, 0.517833740234375, 0.5180364990234375, 0.5180262451171875, 0.5184931640625, 0.518465576171875, 0.518752197265625, 0.5185177612304688, 0.5182627563476563, 0.51831396484375, 0.5179750366210938, 0.5179299926757812, 0.518255615234375, 0.5179238891601563, 0.5180384521484375, 0.5178255615234375, 0.5182105712890624, 0.5178951416015625, 0.5179678955078125, 0.5183211669921876, 0.518451171875, 0.5179801635742187, 0.5185515747070313, 0.518012939453125, 0.5183160400390625, 0.5181572875976562, 0.518414306640625, 0.51803955078125, 0.5180856323242188, 0.5184501953125, 0.5183150024414063, 0.5181255493164062, 0.517992431640625, 0.5180579833984374, 0.5187839965820312, 0.5185628051757812, 0.518540283203125, 0.518202392578125, 0.518066162109375, 0.5179484252929687, 0.5179596557617188, 0.518540283203125, 0.5186969604492188, 0.518518798828125, 0.518950927734375, 0.5188731079101563, 0.5186795654296875, 0.5185914916992187, 0.5187307739257813, 0.5183908081054688, 0.51898876953125, 1.0808607177734375, 0.5182371826171875, 0.5184461059570312, 0.5177640991210938, 0.5177876586914063, 0.517739501953125, 0.5178665161132813, 0.5178562622070313, 0.5178859252929687, 0.5180989379882812, 0.5182730102539063, 0.5177979125976563, 0.5178357543945312, 0.5179658203125, 0.517981201171875, 0.517939208984375, 0.5179945068359375, 0.5179227905273438, 0.518240234375, 0.5178214111328125, 0.5179320068359375, 0.5179780883789062, 0.5183355102539062, 0.5180651245117187, 0.5181480712890625, 0.5180057373046875, 0.5182074584960937, 0.5179883422851562, 0.518451171875, 0.5178818359375, 0.5180845947265625, 0.5179965209960937, 0.51812353515625, 0.5180364990234375, 0.5182320556640625, 0.5181358032226563, 0.5180743408203125, 0.5179351196289063, 0.518034423828125, 0.51783984375, 0.5181317138671875, 0.518181884765625, 0.5181368408203125, 0.518096923828125, 0.5181122436523438, 0.5180252075195313, 0.5180845947265625, 0.5183109130859375, 0.5181265869140625, 0.5181849365234374, 0.5181675415039062, 0.51801904296875, 0.5181522216796876, 0.5180067749023437, 0.5182208251953125, 0.5181409301757812, 0.5182740478515625, 0.5183969116210938, 0.5182013549804687, 0.5183569946289063, 0.518540283203125, 0.5182197875976563, 0.5181859741210938, 1.08067431640625, 0.5179432983398438, 0.5182156982421875, 0.5180016479492188, 0.5179719848632812, 0.51789208984375, 0.5180518188476563, 0.5180037231445312, 0.5178480834960938, 0.5180877075195313, 0.5180457153320313, 0.517917724609375, 0.5178644409179688, 0.5178849487304688, 0.518024169921875, 0.518054931640625, 0.5180067749023437, 0.5178900756835938, 0.5179576416015625, 0.5179576416015625, 0.5179586791992188, 0.5180856323242188, 0.5181337890625, 0.5180282592773438, 0.5181214599609375, 0.5182494506835937, 0.518319091796875, 0.5179115600585937, 0.5183477783203125, 0.5183272705078125, 0.5182802124023438, 0.5183600463867187, 0.5181358032226563, 0.5181696166992188, 0.518255615234375, 0.5183529052734375, 0.5181572875976562, 0.5181859741210938, 0.5181696166992188, 0.51808154296875, 0.51820849609375, 0.5184255981445313, 0.5182371826171875, 0.5183518676757812, 0.5184041137695312, 0.5182156982421875, 0.518381591796875, 0.5181542358398438, 0.5183303833007813, 0.5183754272460938, 0.5185751342773437, 0.5181276245117188, 0.51801904296875, 0.517982177734375, 0.5183969116210938, 0.5180518188476563, 0.5181931762695312, 0.5182013549804687, 0.51814501953125, 0.5182044067382813, 0.5184788208007812, 0.5181685791015626, 0.5182545776367188, 1.0810133056640625, 0.5179525146484375, 0.5181378784179688, 0.5180538940429688, 0.51814501953125, 0.517855224609375, 0.5180466918945312, 0.5178275756835937, 0.5180252075195313, 0.51799755859375, 0.5179699096679687, 0.5178880004882812, 0.5178541870117187, 0.5178347778320312, 0.51806103515625, 0.5181675415039062, 0.5181859741210938, 0.5180692749023438, 0.5179873046875, 0.5182177124023437, 0.518108154296875, 0.5182166748046875, 0.5182371826171875, 0.5179218139648437, 0.5183621215820312, 0.5186570434570312, 0.5182105712890624, 0.5181808471679688, 0.5182515258789062, 0.5181306762695312, 0.5180211181640625, 0.5178193969726562, 0.5181091918945312, 0.5179535522460937, 0.5180016479492188, 0.5181306762695312, 0.518223876953125, 0.5179525146484375, 0.5181911010742187, 0.5179227905273438, 0.5183364868164062, 0.5182883911132813, 0.5184389038085937, 0.5180641479492187, 0.5185689697265625, 0.5180364990234375, 0.5181071166992187, 0.5180067749023437, 0.5183314208984375, 0.5180825805664062, 0.5181122436523438, 0.5179945068359375, 0.5179750366210938, 0.5179535522460937, 0.5183303833007813, 0.5182197875976563, 0.5182955322265625, 0.5183119506835937, 0.51829248046875, 0.518128662109375, 0.518055908203125, 0.5183754272460938, 0.5182371826171875]",tokens/s,1.9000975104953335,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,6386.765824,20902.838272,0.0,20256.391168,19273.711616,s,10,26.8813134765625,2.68813134765625,0.0041309885323175775,2.6865931396484375,2.691104760742187,2.6949006469726564,2.6979373559570314,"[2.68579150390625, 2.698696533203125, 2.6865712890625, 2.6859423828125, 2.686614990234375, 2.68409033203125, 2.68391357421875, 2.689296875, 2.690134765625, 2.69026123046875]",tokens/s,95.23344170782555,kWh,3.169923381672966e-05,1.7372348204735315e-05,0.00014952039739400956,0.00019859197941547455,tokens/kWh,1289075.222239575,MB,6390.304768,20902.838272,0.0,20256.391168,19862.692352,s,10,1586.219609375,158.62196093749998,0.021140987905695616,158.62285937500002,158.64970156249998,158.65553046875,158.66019359375,"[158.621765625, 158.64840625, 158.661359375, 158.62828125, 158.623953125, 158.628453125, 158.605140625, 158.5859375, 158.5989375, 158.617375]",tokens/s,0.39717072987657226,kWh,0.0018725793671773541,0.001026339254797931,0.00885864822580179,0.011757566847777077,tokens/kWh,5358.251483121356,,s,629,1607.8922929687508,2.55626755638911,0.31948633074222244,2.517612548828125,2.519399658203125,2.519920654296875,5.205208671875,"[2.518139892578125, 2.51872265625, 2.517171142578125, 2.517675048828125, 2.517256103515625, 2.51740869140625, 2.51825048828125, 2.5169521484375, 2.517097412109375, 2.516339599609375, 2.5180908203125, 2.51845947265625, 2.51869189453125, 2.518560791015625, 2.519083984375, 2.517611572265625, 2.517076904296875, 2.5172490234375, 2.518533203125, 2.518801513671875, 2.5181748046875, 2.51648193359375, 2.517393310546875, 2.516537353515625, 2.517125, 2.516431884765625, 2.518256591796875, 2.516644775390625, 2.5168720703125, 2.516345947265625, 2.51714453125, 2.516572265625, 2.516453369140625, 2.516675537109375, 2.517611572265625, 2.516471923828125, 2.516367431640625, 2.517232666015625, 2.517244873046875, 2.51826904296875, 2.517273681640625, 2.51704638671875, 2.5178828125, 2.51706884765625, 2.51754296875, 2.518129638671875, 2.519542724609375, 2.5176494140625, 2.517353515625, 2.51711279296875, 2.519594970703125, 2.52010693359375, 2.51871435546875, 2.519090087890625, 2.519048095703125, 2.519532470703125, 2.51997802734375, 2.518859619140625, 2.51943017578125, 2.520383544921875, 2.517666748046875, 2.51677392578125, 5.2204013671875, 2.5192919921875, 2.51740771484375, 2.516737060546875, 2.5188486328125, 2.5170791015625, 2.516949951171875, 2.517423095703125, 2.517424072265625, 2.51706787109375, 2.517665771484375, 2.519553955078125, 2.51833642578125, 2.519075927734375, 2.519300048828125, 2.51761962890625, 2.519573486328125, 2.518274169921875, 2.517041259765625, 2.5171435546875, 2.516961181640625, 2.517271484375, 2.516989013671875, 2.52028515625, 2.518149169921875, 2.5176474609375, 2.517328857421875, 2.51751220703125, 2.51903271484375, 2.51806005859375, 2.51780810546875, 2.51837548828125, 2.519371826171875, 2.517011474609375, 2.519920654296875, 2.5178798828125, 2.517094482421875, 2.51761669921875, 2.51815625, 2.51747216796875, 2.516791259765625, 2.5166181640625, 2.517612548828125, 2.51765576171875, 2.51951513671875, 2.516619384765625, 2.519645263671875, 2.518657958984375, 2.516588623046875, 2.519201904296875, 2.51913720703125, 2.518130615234375, 2.517403564453125, 2.5184736328125, 2.519552001953125, 2.523171875, 2.51789013671875, 2.516390869140625, 2.5189345703125, 2.518498291015625, 2.519341064453125, 2.5183642578125, 2.519233642578125, 5.20647900390625, 2.5165556640625, 2.518474853515625, 2.51786962890625, 2.51740478515625, 2.517843994140625, 2.51732177734375, 2.51726123046875, 2.516327392578125, 2.517376953125, 2.518192138671875, 2.519117919921875, 2.518830078125, 2.516476806640625, 2.51763916015625, 2.517055419921875, 2.517540771484375, 2.51723681640625, 2.519719970703125, 2.51848095703125, 2.517832763671875, 2.516706298828125, 2.51780517578125, 2.518971435546875, 2.5205986328125, 2.51799853515625, 2.51905029296875, 2.52092919921875, 2.517274658203125, 2.51875634765625, 2.5179462890625, 2.51730126953125, 2.51972802734375, 2.519468017578125, 2.518427734375, 2.5177353515625, 2.517422119140625, 2.5163857421875, 2.51761767578125, 2.517665771484375, 2.52023291015625, 2.51808154296875, 2.517843017578125, 2.52073974609375, 2.519414794921875, 2.51922216796875, 2.518572021484375, 2.519636962890625, 2.520416259765625, 2.518666259765625, 2.522156005859375, 2.518053955078125, 2.519404541015625, 2.518847412109375, 2.517646240234375, 2.51810205078125, 2.51926123046875, 2.519097412109375, 2.521001953125, 2.518910888671875, 2.5173779296875, 2.517789794921875, 2.520220703125, 5.20475830078125, 2.51871240234375, 2.5201982421875, 2.520436767578125, 2.51780908203125, 2.516747314453125, 2.51753271484375, 2.516557861328125, 2.51795458984375, 2.51775390625, 2.518048828125, 2.517761962890625, 2.518373291015625, 2.51766064453125, 2.517230712890625, 2.516822021484375, 2.51799755859375, 2.5179033203125, 2.5168720703125, 2.518266845703125, 2.517353515625, 2.5180712890625, 2.518619140625, 2.516643798828125, 2.517336181640625, 2.517548095703125, 2.517536865234375, 2.517360595703125, 2.517929931640625, 2.517650390625, 2.517623779296875, 2.518117431640625, 2.51952734375, 2.51759521484375, 2.517037109375, 2.517159912109375, 2.51675537109375, 2.518679443359375, 2.517864501953125, 2.51694189453125, 2.517684326171875, 2.519645263671875, 2.518003662109375, 2.517291015625, 2.516994140625, 2.51759912109375, 2.5174609375, 2.51873388671875, 2.517940185546875, 2.51837841796875, 2.51715185546875, 2.5170146484375, 2.51955517578125, 2.518741943359375, 2.5189775390625, 2.5173525390625, 2.517677978515625, 2.51769140625, 2.51719580078125, 2.51702783203125, 2.5181552734375, 2.519466064453125, 2.519775146484375, 5.2051865234375, 2.517904296875, 2.51915869140625, 2.51858935546875, 2.519103515625, 2.519520263671875, 2.517877685546875, 2.517642333984375, 2.518906982421875, 2.51755419921875, 2.517623779296875, 2.517487548828125, 2.518580322265625, 2.518183837890625, 2.517307373046875, 2.516926513671875, 2.51662353515625, 2.519920654296875, 2.516429931640625, 2.518091796875, 2.5164921875, 2.5171865234375, 2.51658251953125, 2.517351318359375, 2.516623291015625, 2.517256103515625, 2.517856201171875, 2.517544921875, 2.516708251953125, 2.519246826171875, 2.518055908203125, 2.518032470703125, 2.516893798828125, 2.519097412109375, 2.517005126953125, 2.517876708984375, 2.5175634765625, 2.518494140625, 2.5167412109375, 2.5177138671875, 2.518095947265625, 2.51822998046875, 2.518499267578125, 2.516704345703125, 2.520115234375, 2.517539794921875, 2.5169130859375, 2.517727294921875, 2.517357666015625, 2.5173330078125, 2.518453369140625, 2.520203369140625, 2.517231689453125, 2.517478515625, 2.517189697265625, 2.517262451171875, 2.517794921875, 2.519332763671875, 2.517783447265625, 2.517359619140625, 2.51770458984375, 2.517918701171875, 2.5172275390625, 5.20521728515625, 2.518489013671875, 2.51708203125, 2.5163857421875, 2.51787060546875, 2.5188076171875, 2.518327392578125, 2.517561279296875, 2.519435302734375, 2.52038037109375, 2.517274658203125, 2.517645263671875, 2.517359619140625, 2.517127197265625, 2.516441162109375, 2.516781982421875, 2.518454345703125, 2.517274658203125, 2.51644921875, 2.51639306640625, 2.5190185546875, 2.5164248046875, 2.516654052734375, 2.5163837890625, 2.51848388671875, 2.516992919921875, 2.5170400390625, 2.51709033203125, 2.517877685546875, 2.516539306640625, 2.516367431640625, 2.51928173828125, 2.519300048828125, 2.51730029296875, 2.516928466796875, 2.516885498046875, 2.518531982421875, 2.5179638671875, 2.518365234375, 2.521257080078125, 2.5186845703125, 2.51814697265625, 2.518978515625, 2.518739013671875, 2.51789208984375, 2.517880859375, 2.5175458984375, 2.51723779296875, 2.520966064453125, 2.5173125, 2.517146728515625, 2.517482421875, 2.51751123046875, 2.518137939453125, 2.517230712890625, 2.51829150390625, 2.51913720703125, 2.51747216796875, 2.518295654296875, 2.51835693359375, 2.5180498046875, 2.51679443359375, 2.516978759765625, 5.20618505859375, 2.516756591796875, 2.516822021484375, 2.516474853515625, 2.517072998046875, 2.51698583984375, 2.517000244140625, 2.517266357421875, 2.51782666015625, 2.51772119140625, 2.518513671875, 2.518520751953125, 2.517814208984375, 2.516486083984375, 2.516232177734375, 2.51664892578125, 2.517416015625, 2.51725, 2.517421142578125, 2.51761767578125, 2.517550048828125, 2.517233642578125, 2.51706982421875, 2.517667724609375, 2.518662109375, 2.517381103515625, 2.5167001953125, 2.517783447265625, 2.51818603515625, 2.51659375, 2.51644921875, 2.516968505859375, 2.51725927734375, 2.517319580078125, 2.51740673828125, 2.517291015625, 2.51770068359375, 2.516655029296875, 2.516579345703125, 2.518679443359375, 2.51792578125, 2.517526611328125, 2.518116455078125, 2.51873291015625, 2.517396484375, 2.517900390625, 2.51812255859375, 2.5177353515625, 2.5179853515625, 2.516726806640625, 2.5171015625, 2.517467041015625, 2.518310791015625, 2.51728173828125, 2.516833251953125, 2.51797412109375, 2.517671875, 2.518767578125, 2.51833251953125, 2.518369384765625, 2.519214111328125, 2.5175, 2.517960693359375, 5.207421875, 2.516853759765625, 2.517536865234375, 2.51687841796875, 2.516634521484375, 2.516380615234375, 2.51723681640625, 2.517261474609375, 2.51818798828125, 2.518327392578125, 2.518880126953125, 2.518677490234375, 2.517012451171875, 2.516658203125, 2.5166181640625, 2.51793505859375, 2.517747802734375, 2.519637939453125, 2.5187236328125, 2.518320068359375, 2.517434326171875, 2.516303955078125, 2.516716552734375, 2.516737060546875, 2.516611083984375, 2.516365234375, 2.51681591796875, 2.516877197265625, 2.517065673828125, 2.516994140625, 2.517845947265625, 2.518003662109375, 2.517454833984375, 2.51677685546875, 2.517212158203125, 2.517548095703125, 2.51734228515625, 2.51691015625, 2.5173115234375, 2.51702587890625, 2.51687109375, 2.517181396484375, 2.517095458984375, 2.517434326171875, 2.518919189453125, 2.51658447265625, 2.516896728515625, 2.51681787109375, 2.51647900390625, 2.5165732421875, 2.516769775390625, 2.51728271484375, 2.516929443359375, 2.516444091796875, 2.5165341796875, 2.517580810546875, 2.51643798828125, 2.51633984375, 2.51785302734375, 2.517928955078125, 2.516727783203125, 2.51658544921875, 2.516779052734375, 5.207884765625, 2.51725, 2.51802001953125, 2.517275634765625, 2.516849609375, 2.516008056640625, 2.516381591796875, 2.51746826171875, 2.51627734375, 2.516135986328125, 2.5164912109375, 2.5164912109375, 2.516265869140625, 2.5161103515625, 2.516736083984375, 2.51685693359375, 2.51656396484375, 2.51710986328125, 2.517779541015625, 2.51814697265625, 2.517348388671875, 2.516961181640625, 2.518197265625, 2.51780810546875, 2.518084716796875, 2.518426513671875, 2.517813232421875, 2.517560302734375, 2.51685693359375, 2.51721728515625, 2.516894775390625, 2.517078125, 2.516939697265625, 2.516675537109375, 2.516822021484375, 2.516694091796875, 2.51648828125, 2.516349853515625, 2.51686083984375, 2.51789404296875, 2.516864013671875, 2.516809814453125, 2.516926513671875, 2.51690185546875, 2.51774462890625, 2.518223876953125, 2.51867236328125, 2.520320068359375, 2.518391845703125, 2.518958984375, 2.5195068359375, 2.51765771484375, 2.51833251953125, 2.519100341796875, 2.51811328125, 2.5185771484375, 2.51877587890625, 2.5190849609375, 2.51871337890625, 2.517376953125, 2.516873291015625, 2.516703125, 2.517409912109375, 5.20814892578125, 2.516905029296875, 2.517455810546875, 2.51675341796875, 2.5166611328125, 2.5164248046875, 2.51766064453125, 2.516621337890625, 2.516347900390625, 2.51616455078125, 2.5168916015625, 2.516533203125, 2.51630078125, 2.51683935546875, 2.5170791015625, 2.5168505859375, 2.51635498046875, 2.516509765625, 2.517519287109375, 2.517980224609375, 2.517612548828125, 2.51685888671875, 2.5184287109375, 2.517878662109375, 2.51812548828125, 2.51820849609375, 2.519699462890625, 2.51765771484375, 2.517099609375, 2.517208984375, 2.51761962890625, 2.5169130859375, 2.51656591796875, 2.5162998046875, 2.51715185546875, 2.517620849609375, 2.516823974609375, 2.516893798828125, 2.517906494140625, 2.517166259765625, 2.5167236328125, 2.51692236328125, 2.51753271484375, 2.51745166015625, 2.516619384765625, 2.517034912109375, 2.51966259765625, 2.5194423828125, 2.519153564453125, 2.519391357421875, 2.519334716796875, 2.520791015625, 2.5193984375, 2.51885986328125, 2.518571044921875, 2.519627685546875, 2.519412841796875, 2.5196328125, 2.5182587890625, 2.516928466796875, 2.517813232421875, 2.518243408203125, 2.5184306640625]",tokens/s,0.3911953572702552,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2293.178368,3364.356096,0.0,2717.908992,2483.907584,s,10,2.332126235961914,0.2332126235961914,0.0007025035151089087,0.23313691711425782,0.23428714752197263,0.2343353157043457,0.23437385025024415,"[0.2342764434814453, 0.23438348388671876, 0.23256541442871093, 0.2323934326171875, 0.23265362548828125, 0.23239085388183595, 0.23300813293457032, 0.2332657012939453, 0.2336903381347656, 0.23349880981445312]",tokens/s,1097.7107330316092,kWh,2.7429988588949646e-06,1.5025212867385563e-06,1.2985660641045106e-05,1.7231180786678625e-05,tokens/kWh,14856788.003635412,MB,2293.178368,3364.356096,0.0,2717.908992,2632.491008,s,10,135.513068359375,13.5513068359375,0.004490003668394658,13.54976904296875,13.55813466796875,13.559575634765624,13.560728408203124,"[13.557814453125, 13.5512802734375, 13.549751953125, 13.54661328125, 13.549029296875, 13.5610166015625, 13.5497861328125, 13.546083984375, 13.5524775390625, 13.54921484375]",tokens/s,4.64899811971836,kWh,0.0001599563909630583,8.766855071097778e-05,0.0007514645898075666,0.0009990895314816027,tokens/kWh,63057.411788284844,,s,629,137.39515400695814,0.21843426710168204,0.027695203958367918,0.2150584259033203,0.21541007690429687,0.21553008728027342,0.44768570190429685,"[0.215546875, 0.21509120178222657, 0.21484748840332032, 0.2146693115234375, 0.2148485107421875, 0.21482701110839844, 0.21496627807617188, 0.2148546600341797, 0.2148341827392578, 0.2148423614501953, 0.2148730926513672, 0.21502566528320313, 0.21523968505859375, 0.21587353515625, 0.2149601287841797, 0.2150584259033203, 0.21547929382324219, 0.21505638122558593, 0.21523866271972655, 0.21525401306152345, 0.2153605194091797, 0.21535232543945312, 0.21514854431152344, 0.21529600524902343, 0.2149396514892578, 0.21511885070800782, 0.21522738647460937, 0.21515980529785156, 0.21498982238769532, 0.21505433654785155, 0.21510963439941405, 0.21512602233886718, 0.2155325469970703, 0.21535232543945312, 0.215225341796875, 0.21511270141601563, 0.21541171264648437, 0.21544345092773437, 0.2153164825439453, 0.21536767578125, 0.2153871307373047, 0.21529087829589844, 0.21537893676757813, 0.21553868103027343, 0.21545779418945313, 0.21531546020507814, 0.21509529113769532, 0.2152816619873047, 0.21531954956054689, 0.2148720703125, 0.215088134765625, 0.2149949493408203, 0.21501849365234374, 0.21520999145507813, 0.2153052215576172, 0.21509529113769532, 0.21530111694335938, 0.2151014404296875, 0.21536665344238282, 0.21565542602539062, 0.21544345092773437, 0.21517721557617187, 0.4497049560546875, 0.21492530822753905, 0.21484031677246093, 0.21474610900878907, 0.21518130493164062, 0.21497036743164064, 0.2149539794921875, 0.2148833312988281, 0.21501951599121094, 0.21493760681152344, 0.21485568237304686, 0.21516493225097658, 0.21500518798828125, 0.2150102996826172, 0.2150328369140625, 0.21521510314941406, 0.2148863983154297, 0.21485055541992187, 0.21527859497070312, 0.21493862915039064, 0.21490380859375, 0.2149171142578125, 0.21504307556152344, 0.21492428588867188, 0.21488844299316406, 0.21539634704589844, 0.21504307556152344, 0.2151526336669922, 0.21495603942871094, 0.21498675537109374, 0.21492019653320313, 0.21486285400390626, 0.21517312622070311, 0.2152755126953125, 0.21528883361816406, 0.21505433654785155, 0.21547929382324219, 0.21528985595703126, 0.21551922607421875, 0.21516184997558593, 0.21498265075683592, 0.21506048583984375, 0.21529702758789063, 0.21518438720703126, 0.21498880004882812, 0.21501747131347657, 0.21603123474121094, 0.21503077697753906, 0.21511576843261718, 0.21513523864746092, 0.21525605773925782, 0.21546394348144532, 0.2150963134765625, 0.21497445678710939, 0.2149396514892578, 0.2152611846923828, 0.21501338195800782, 0.2153605194091797, 0.21494989013671875, 0.2149775390625, 0.21501235961914061, 0.21545472717285155, 0.21544960021972656, 0.4477255554199219, 0.21488946533203124, 0.2147993621826172, 0.214830078125, 0.21490687561035157, 0.21492019653320313, 0.21471743774414062, 0.21540556335449218, 0.21555815124511718, 0.2154772491455078, 0.21546086120605468, 0.21494784545898438, 0.21496524047851562, 0.2147747802734375, 0.21512191772460937, 0.215014404296875, 0.214935546875, 0.21500314331054687, 0.21485055541992187, 0.21497445678710939, 0.2149775390625, 0.21503897094726562, 0.21483314514160157, 0.2149283905029297, 0.21502053833007811, 0.21528678894042969, 0.2153553924560547, 0.21509735107421876, 0.21505229187011718, 0.2149539794921875, 0.21523455810546874, 0.2149099578857422, 0.21484339904785157, 0.21541888427734374, 0.21507379150390624, 0.21519564819335937, 0.2150328369140625, 0.21511065673828125, 0.21497343444824218, 0.2153123779296875, 0.21532876586914063, 0.21510963439941405, 0.21525709533691406, 0.21491506958007814, 0.2153175048828125, 0.21504818725585936, 0.2149048309326172, 0.21543218994140625, 0.21501951599121094, 0.21494886779785155, 0.21495603942871094, 0.21506764221191407, 0.21505433654785155, 0.21506150817871095, 0.21549261474609374, 0.2151208953857422, 0.21526220703125, 0.21505331420898438, 0.21497036743164064, 0.21506048583984375, 0.2150440979003906, 0.21503590393066407, 0.21496934509277343, 0.4474736633300781, 0.21475225830078126, 0.2146570281982422, 0.21483314514160157, 0.2149171142578125, 0.21528678894042969, 0.21490380859375, 0.21523353576660156, 0.2151004180908203, 0.214935546875, 0.21490176391601562, 0.21502362060546876, 0.21514854431152344, 0.21500006103515626, 0.21510861206054688, 0.21527040100097655, 0.21625958251953126, 0.2149365692138672, 0.2149539794921875, 0.21506150817871095, 0.21505638122558593, 0.21492326354980468, 0.21511167907714843, 0.21502362060546876, 0.21509120178222657, 0.21505433654785155, 0.21530931091308594, 0.2149396514892578, 0.2147993621826172, 0.21485568237304686, 0.2149283905029297, 0.214972412109375, 0.2149918670654297, 0.214898681640625, 0.21487820434570312, 0.21496421813964844, 0.21520281982421874, 0.214972412109375, 0.2149775390625, 0.21498675537109374, 0.2149212188720703, 0.21492941284179687, 0.21481471252441406, 0.21471128845214843, 0.21537791442871093, 0.21497856140136717, 0.21484442138671875, 0.21496421813964844, 0.21498573303222657, 0.215046142578125, 0.21490176391601562, 0.2149591064453125, 0.2149529571533203, 0.2151034851074219, 0.21514137268066405, 0.2150768585205078, 0.21502259826660156, 0.21512191772460937, 0.2151628875732422, 0.2150584259033203, 0.21498573303222657, 0.21492019653320313, 0.21532365417480467, 0.4478924865722656, 0.21483724975585938, 0.21483314514160157, 0.21480447387695312, 0.21494476318359376, 0.21484339904785157, 0.2147061767578125, 0.214793212890625, 0.21511167907714843, 0.2152048645019531, 0.2148833312988281, 0.21500518798828125, 0.21489459228515626, 0.21502464294433593, 0.2147235870361328, 0.21478810119628905, 0.2147553253173828, 0.21490176391601562, 0.2149591064453125, 0.21520588684082032, 0.21484646606445312, 0.2149109802246094, 0.21492633056640625, 0.21500006103515626, 0.21491302490234376, 0.21484133911132813, 0.2149908447265625, 0.21542912292480468, 0.2149601287841797, 0.21523455810546874, 0.21497958374023438, 0.21499288940429687, 0.21481471252441406, 0.21496524047851562, 0.21497343444824218, 0.21500518798828125, 0.21537484741210938, 0.21541786193847656, 0.21500723266601562, 0.21525605773925782, 0.21530111694335938, 0.21528472900390624, 0.21499903869628906, 0.21505126953125, 0.2150266876220703, 0.21561138916015626, 0.21505638122558593, 0.21511680603027344, 0.2150707244873047, 0.21535232543945312, 0.2149775390625, 0.21508709716796875, 0.21489561462402343, 0.21500825500488283, 0.21495603942871094, 0.21576191711425782, 0.2151034851074219, 0.21574656677246093, 0.21505946350097657, 0.21543321228027343, 0.21538304138183595, 0.21532261657714843, 0.21520895385742186, 0.44758322143554685, 0.21562471008300782, 0.2151034851074219, 0.2150707244873047, 0.21508096313476563, 0.21512396240234374, 0.2149396514892578, 0.21493350219726562, 0.21506355285644532, 0.21523353576660156, 0.21518130493164062, 0.21520281982421874, 0.21525914001464844, 0.2150891571044922, 0.21512294006347657, 0.21514035034179688, 0.21520384216308594, 0.2151761932373047, 0.21500518798828125, 0.21573017883300782, 0.2152489013671875, 0.2153871307373047, 0.21496524047851562, 0.21487718200683595, 0.2150440979003906, 0.21491404724121094, 0.21495706176757812, 0.2153359375, 0.2152611846923828, 0.2150963134765625, 0.21533287048339844, 0.21510861206054688, 0.2150645751953125, 0.21524787902832032, 0.21535334777832033, 0.21540966796875, 0.2150584259033203, 0.21552639770507812, 0.21640089416503908, 0.21548133850097656, 0.2153297882080078, 0.21563392639160156, 0.21536256408691407, 0.21525299072265625, 0.21546290588378905, 0.21525094604492187, 0.21537382507324218, 0.2153553924560547, 0.2156943359375, 0.21538815307617187, 0.21539328002929686, 0.21541273498535157, 0.21524479675292968, 0.2151137237548828, 0.21520179748535156, 0.21540045166015626, 0.21538201904296875, 0.21517106628417967, 0.21503181457519532, 0.21518438720703126, 0.2152314910888672, 0.21534002685546874, 0.2152857666015625, 0.44831845092773437, 0.2153543701171875, 0.21508607482910155, 0.2153891906738281, 0.21522738647460937, 0.2152263641357422, 0.2151383056640625, 0.2148311004638672, 0.2147604522705078, 0.21474508666992187, 0.2151946258544922, 0.21490176391601562, 0.2149283905029297, 0.2148341827392578, 0.21475634765625, 0.21509426879882812, 0.21515367126464843, 0.2148720703125, 0.2148106231689453, 0.21511576843261718, 0.2154588165283203, 0.21494169616699219, 0.21514752197265624, 0.21526629638671874, 0.21540658569335938, 0.2152980499267578, 0.21505433654785155, 0.21525094604492187, 0.21514956665039062, 0.21498573303222657, 0.2155397186279297, 0.21500006103515626, 0.21513113403320314, 0.21502362060546876, 0.21486079406738282, 0.2149171142578125, 0.21485977172851561, 0.21519564819335937, 0.21494682312011718, 0.2151137237548828, 0.2152744903564453, 0.21504512023925781, 0.21480755615234376, 0.21494682312011718, 0.21507379150390624, 0.214935546875, 0.21486796569824218, 0.2152191925048828, 0.21546086120605468, 0.21495603942871094, 0.21510963439941405, 0.21517414855957032, 0.21489356994628905, 0.21515058898925782, 0.21519973754882812, 0.214898681640625, 0.21508709716796875, 0.2153492431640625, 0.2149160919189453, 0.21502873229980468, 0.21509735107421876, 0.21509426879882812, 0.21496217346191407, 0.4484382629394531, 0.21479731750488282, 0.2149713897705078, 0.2148546600341797, 0.21475328063964844, 0.21487615966796875, 0.21529498291015625, 0.21497856140136717, 0.2148106231689453, 0.21494169616699219, 0.21501849365234374, 0.21517312622070311, 0.21516390991210937, 0.2153175048828125, 0.21526835632324218, 0.21509222412109374, 0.21496832275390626, 0.2149171142578125, 0.21479423522949218, 0.21496421813964844, 0.21496115112304687, 0.21497958374023438, 0.21504205322265624, 0.2148106231689453, 0.21492633056640625, 0.21496832275390626, 0.2149160919189453, 0.2148863983154297, 0.21481266784667968, 0.21477171325683594, 0.21533900451660157, 0.21499903869628906, 0.21496115112304687, 0.2150102996826172, 0.2149918670654297, 0.2151761932373047, 0.21486285400390626, 0.215046142578125, 0.21498471069335937, 0.21511065673828125, 0.21503077697753906, 0.21505126953125, 0.2149713897705078, 0.21494169616699219, 0.2152499237060547, 0.21516184997558593, 0.21492428588867188, 0.21485261535644531, 0.215046142578125, 0.215372802734375, 0.21504512023925781, 0.215119873046875, 0.21498675537109374, 0.21502156066894532, 0.21487513732910157, 0.2149959716796875, 0.21509324645996095, 0.2150891571044922, 0.21487820434570312, 0.21541171264648437, 0.21502566528320313, 0.2151208953857422, 0.21514239501953125, 0.4488765563964844, 0.21482086181640625, 0.21482496643066407, 0.21509735107421876, 0.21479014587402342, 0.21473484802246093, 0.2146693115234375, 0.21500518798828125, 0.21510758972167968, 0.21496217346191407, 0.2148106231689453, 0.21478604125976564, 0.21548748779296875, 0.21496627807617188, 0.2150328369140625, 0.21530624389648437, 0.21493862915039064, 0.2148843536376953, 0.21493145751953124, 0.21505126953125, 0.214908935546875, 0.21497958374023438, 0.21564518737792968, 0.21497343444824218, 0.21515367126464843, 0.21530931091308594, 0.21535334777832033, 0.21521817016601563, 0.21496934509277343, 0.21508096313476563, 0.2152929229736328, 0.21582028198242187, 0.21554074096679687, 0.2151751708984375, 0.2154168395996094, 0.21572300720214843, 0.21506253051757812, 0.215151611328125, 0.21505229187011718, 0.2149959716796875, 0.21501235961914061, 0.21538406372070312, 0.2150758361816406, 0.2152110137939453, 0.2150707244873047, 0.21545472717285155, 0.21499903869628906, 0.21496832275390626, 0.2150277099609375, 0.21533900451660157, 0.21492633056640625, 0.21502464294433593, 0.21509837341308594, 0.21507994079589843, 0.21537997436523437, 0.21541375732421875, 0.21506764221191407, 0.21498982238769532, 0.21500108337402343, 0.21577011108398436, 0.21502975463867188, 0.2149222412109375, 0.21507481384277344, 0.4486348876953125, 0.21474919128417969, 0.21470719909667968, 0.21471743774414062, 0.2151700439453125, 0.21507994079589843, 0.2148239288330078, 0.21487820434570312, 0.21485568237304686, 0.21500108337402343, 0.21522840881347657, 0.21483622741699218, 0.21483212280273437, 0.21519155883789062, 0.21522738647460937, 0.2150154266357422, 0.21518438720703126, 0.21501951599121094, 0.21515776062011718, 0.21541375732421875, 0.2148536376953125, 0.21507174682617186, 0.21507174682617186, 0.21522329711914062, 0.21565644836425782, 0.21497958374023438, 0.2154239959716797, 0.21499288940429687, 0.21505229187011718, 0.21485157775878908, 0.21501132202148437, 0.21518540954589843, 0.21500108337402343, 0.21493760681152344, 0.21507174682617186, 0.2150830078125, 0.2148730926513672, 0.21516697692871095, 0.21505946350097657, 0.21500314331054687, 0.21530419921875, 0.2153369598388672, 0.2151331787109375, 0.21504103088378906, 0.21514752197265624, 0.21487615966796875, 0.21510963439941405, 0.21521510314941406, 0.21491302490234376, 0.21509939575195314, 0.21494989013671875, 0.21516082763671876, 0.2149212188720703, 0.2151004180908203, 0.21516697692871095, 0.2149181365966797, 0.2149591064453125, 0.21501644897460936, 0.21512806701660156, 0.21530316162109375, 0.2154035186767578, 0.21518130493164062, 0.21506866455078125]",tokens/s,4.578036281892053,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3058-335d8a3a6750fa877652f3c5;12e3bddd-6b77-4b91-84b7-0f3f154813fa) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1531.768832,9676.783616,0.0,9030.336512,8583.572992,s,10,9.471071899414062,0.9471071899414062,0.0007865321197485112,0.9470617980957031,0.9481417907714844,0.9482434417724609,0.9483247625732422,"[0.9481192016601563, 0.9483450927734375, 0.9464827270507813, 0.9463027954101563, 0.9464989624023438, 0.9459486694335938, 0.9466808471679687, 0.9474427490234375, 0.9474686889648437, 0.9477821655273437]",tokens/s,270.2967549172948,kWh,1.1175276083175583e-05,6.1235719211254035e-06,5.30962293456365e-05,7.039507734993748e-05,tokens/kWh,3636617.9232591945,MB,1531.768832,9676.783616,0.0,9030.336512,8872.966144,s,10,564.2868125,56.42868125,0.005902619636999738,56.430064453125,56.434701171875,56.4354833984375,56.4361091796875,"[56.42066796875, 56.42100390625, 56.427828125, 56.43380078125, 56.436265625, 56.4306328125, 56.43452734375, 56.42949609375, 56.41955859375, 56.43303125]",tokens/s,1.116453523357858,kWh,0.0006659982151861745,0.00036502579988969956,0.003165337650954964,0.004196361666030838,tokens/kWh,15013.005316004865,,s,629,571.914466552734,0.9092439849804999,0.11250283527003516,0.8956805419921875,0.8961118286132812,0.8962535400390624,1.84229025390625,"[0.8955299682617187, 0.8956344604492188, 0.8952893676757813, 0.8954654541015625, 0.8952473754882813, 0.8954511108398437, 0.8954111938476562, 0.895388671875, 0.8953507690429687, 0.8954296264648437, 0.8954921264648438, 0.8958034057617188, 0.8957071533203125, 0.895331298828125, 0.89537841796875, 0.8954317016601563, 0.895688720703125, 0.8951869506835938, 0.89523095703125, 0.895341552734375, 0.8953385009765625, 0.89526171875, 0.8952105712890625, 0.8952554931640625, 0.8955637817382812, 0.8952135620117188, 0.895278076171875, 0.89569384765625, 0.8954439697265625, 0.8954368286132812, 0.8953395385742188, 0.895320068359375, 0.895224853515625, 0.8956580200195312, 0.8956251220703125, 0.895593505859375, 0.8955872802734375, 0.895393798828125, 0.89556591796875, 0.8955975341796875, 0.8953139038085938, 0.8952350463867188, 0.8954644775390626, 0.8952453002929688, 0.8950845336914063, 0.89560888671875, 0.8954244384765625, 0.8954317016601563, 0.8957972412109375, 0.8959805297851563, 0.8959723510742188, 0.896089111328125, 0.8958925170898437, 0.8958883666992188, 0.8961648559570312, 0.8958853149414062, 0.896374755859375, 0.896205810546875, 0.8961065063476562, 0.89630517578125, 0.89601025390625, 0.8961126098632812, 1.844611083984375, 0.8954337158203125, 0.89558837890625, 0.8955914306640625, 0.8959385375976563, 0.8956160278320312, 0.8950343627929688, 0.8949083862304688, 0.8952064208984375, 0.895373291015625, 0.8951787719726563, 0.895162353515625, 0.8951439208984375, 0.895182861328125, 0.895109130859375, 0.8953907470703125, 0.8957255859375, 0.8957091674804688, 0.8956385498046875, 0.8958136596679688, 0.8958320922851563, 0.8952658081054687, 0.8952637329101563, 0.8951807861328125, 0.8952391967773438, 0.8951900024414062, 0.8952842407226562, 0.8955750122070313, 0.89556787109375, 0.8954613647460937, 0.895562744140625, 0.8955679321289063, 0.8955903930664062, 0.8953548583984375, 0.8952647705078125, 0.8954501342773438, 0.8954869995117187, 0.89586181640625, 0.8957409057617187, 0.8956713256835938, 0.8955576171875, 0.8954132690429687, 0.8955822143554687, 0.89568359375, 0.8954531860351562, 0.8953220825195313, 0.89735986328125, 0.8953026733398437, 0.8954552612304687, 0.8961781616210938, 0.8958126220703125, 0.8957644653320312, 0.8956436767578125, 0.8958248901367187, 0.896047119140625, 0.8958515014648437, 0.8958351440429687, 0.8958453979492188, 0.895657958984375, 0.8957132568359375, 0.8961444091796875, 0.8957071533203125, 0.8954951782226562, 1.842377685546875, 0.895404052734375, 0.8950650634765625, 0.8951746826171875, 0.8952135620117188, 0.895152099609375, 0.8950364379882813, 0.8954869995117187, 0.8951756591796876, 0.8951070556640625, 0.89539794921875, 0.8955360717773437, 0.8956303100585937, 0.895278076171875, 0.8953108520507812, 0.895466552734375, 0.8955247802734375, 0.8954808349609376, 0.89577880859375, 0.8958197631835938, 0.8954214477539062, 0.8957296752929688, 0.8957849731445312, 0.8969154663085938, 0.8955699462890625, 0.8955740356445312, 0.8955402221679688, 0.8957010498046875, 0.8956046752929687, 0.8959457397460937, 0.89588427734375, 0.895657958984375, 0.8959969482421875, 0.8955279541015625, 0.89569384765625, 0.8956375122070312, 0.8958167114257812, 0.89558837890625, 0.895529052734375, 0.89546337890625, 0.8958197631835938, 0.8958023681640624, 0.895668212890625, 0.895762451171875, 0.8955596923828125, 0.89588427734375, 0.8957982788085938, 0.896742431640625, 0.8959180297851562, 0.895889404296875, 0.8958699340820313, 0.8959774780273437, 0.8961423950195313, 0.8960029907226562, 0.895941650390625, 0.8961116333007813, 0.8960184326171875, 0.8960419921875, 0.8958904418945313, 0.8957849731445312, 0.8956774291992188, 0.8956559448242187, 0.8954009399414062, 1.84203369140625, 0.8953282470703126, 0.8955074462890625, 0.8958883666992188, 0.895805419921875, 0.895224853515625, 0.8955054321289062, 0.89569482421875, 0.8954593505859375, 0.895592529296875, 0.89609619140625, 0.89571533203125, 0.89652734375, 0.8958136596679688, 0.8957890625, 0.8957183837890625, 0.8957440185546875, 0.8959334106445312, 0.895752197265625, 0.8961761474609375, 0.8955248413085938, 0.896421875, 0.895963134765625, 0.895330322265625, 0.8955340576171875, 0.8957603759765626, 0.8954306640625, 0.8958146362304688, 0.895805419921875, 0.8954982299804688, 0.8955299682617187, 0.8953681640625, 0.8958136596679688, 0.8957706298828125, 0.89615869140625, 0.8956036987304687, 0.8954429321289062, 0.895578125, 0.8957962036132813, 0.8954183959960937, 0.8954849243164062, 0.89569384765625, 0.8957583618164062, 0.8959283447265625, 0.8975103759765625, 0.8956825561523437, 0.8957440185546875, 0.895594482421875, 0.8960829467773438, 0.8958392333984375, 0.8954531860351562, 0.8957625122070313, 0.8960316772460938, 0.8954849243164062, 0.8954644775390626, 0.8962181396484376, 0.895805419921875, 0.8961618041992188, 0.8958760986328125, 0.8960430297851563, 0.89603173828125, 0.8957348022460937, 0.8958709716796875, 1.8425006103515624, 0.8954132690429687, 0.8952770385742187, 0.8951552124023437, 0.8951265258789063, 0.8949452514648437, 0.8951756591796876, 0.8958238525390625, 0.895236083984375, 0.895140869140625, 0.8951900024414062, 0.8952207641601563, 0.895283203125, 0.8956876831054688, 0.8953272094726562, 0.89558837890625, 0.8958331298828125, 0.896849853515625, 0.8957357788085938, 0.8959354858398437, 0.8956190795898438, 0.8957183837890625, 0.89544189453125, 0.8957081298828125, 0.8961085205078125, 0.8960030517578125, 0.895741943359375, 0.8960706787109375, 0.8960758056640625, 0.8956846313476563, 0.8959539794921875, 0.8957930908203126, 0.8956805419921875, 0.8956405639648437, 0.8957777709960938, 0.895805419921875, 0.8960061645507813, 0.8956876831054688, 0.8957982788085938, 0.8960235595703125, 0.896294921875, 0.8963491821289062, 0.8959744262695313, 0.8960379028320312, 0.8958555908203125, 0.8957880249023438, 0.8960327758789063, 0.8959774780273437, 0.8963082275390625, 0.8961085205078125, 0.8957849731445312, 0.8961433715820313, 0.896015380859375, 0.8959569702148438, 0.89592626953125, 0.895857666015625, 0.89588427734375, 0.8977633056640625, 0.8960972900390625, 0.8961024169921875, 0.8960040893554687, 0.8959642333984374, 0.895754150390625, 1.8420654296875, 0.895541259765625, 0.8955330810546875, 0.895551513671875, 0.8954542846679687, 0.89588525390625, 0.8957655639648437, 0.8955913696289063, 0.8954900512695313, 0.895973388671875, 0.8957081298828125, 0.895446044921875, 0.8953692016601562, 0.8954695434570312, 0.8955350952148438, 0.8955064086914063, 0.8957685546875, 0.895510498046875, 0.8955617065429687, 0.89569384765625, 0.8958258666992187, 0.8956436767578125, 0.8955709228515625, 0.8953487548828125, 0.895425537109375, 0.89541015625, 0.8953661499023438, 0.8959344482421875, 0.8957112426757813, 0.8955719604492187, 0.8957173461914063, 0.8952852783203125, 0.895425537109375, 0.8956323852539062, 0.8953405151367188, 0.8953170166015625, 0.8956170043945313, 0.89562109375, 0.8976466064453125, 0.8961249389648438, 0.8960225830078125, 0.8955401611328125, 0.8958443603515625, 0.8955822143554687, 0.8957726440429687, 0.8958177490234375, 0.8958822631835938, 0.8957255859375, 0.89603173828125, 0.8960040893554687, 0.8958167114257812, 0.8959232177734375, 0.8961556396484375, 0.895921142578125, 0.8957132568359375, 0.8960726928710937, 0.8958699340820313, 0.8956088256835938, 0.8956508178710938, 0.8958750610351562, 0.8959140014648438, 0.896184326171875, 0.8957747192382812, 1.8425538330078124, 0.8953139038085938, 0.8954296264648437, 0.8954317016601563, 0.8954019775390625, 0.8954296264648437, 0.895804443359375, 0.89565185546875, 0.8956016845703125, 0.8954736938476563, 0.8951910400390625, 0.8951807861328125, 0.8956160278320312, 0.8953865966796875, 0.895224853515625, 0.8951572265625, 0.895447021484375, 0.8951286010742188, 0.8952534790039063, 0.8959088745117187, 0.8953425903320312, 0.8954378051757812, 0.8953170166015625, 0.8966533203125, 0.8956589965820313, 0.8957174072265625, 0.8958781127929687, 0.8958924560546875, 0.89592626953125, 0.895657958984375, 0.8963993530273437, 0.8960133056640625, 0.8955350952148438, 0.895636474609375, 0.8958023681640624, 0.8959241943359375, 0.8960481567382812, 0.8959610595703125, 0.8962498779296875, 0.8960297241210937, 0.8959395751953125, 0.8961136474609375, 0.896173095703125, 0.8961300048828125, 0.8959856567382812, 0.8956856079101563, 0.8959078369140625, 0.8958648071289063, 0.8959365234375, 0.8959959106445312, 0.89580029296875, 0.8960338134765625, 0.8962744140625, 0.8961085205078125, 0.896195556640625, 0.8962867431640625, 0.8956589965820313, 0.896268310546875, 0.8962437133789063, 0.896083984375, 0.895952880859375, 0.8960859985351562, 0.8958607177734375, 1.8435286865234375, 0.8953579711914063, 0.8959723510742188, 0.895552490234375, 0.8962406616210937, 0.895710205078125, 0.8959600830078125, 0.895594482421875, 0.8955453491210937, 0.8953630981445313, 0.8957020263671875, 0.8955637817382812, 0.895478759765625, 0.8956026611328125, 0.89554736328125, 0.8955330810546875, 0.8961474609375, 0.895688720703125, 0.8956405639648437, 0.89556787109375, 0.8954163208007813, 0.8957962036132813, 0.8957061157226562, 0.8955453491210937, 0.895515625, 0.8955586547851563, 0.8955166625976563, 0.8954368286132812, 0.8956876831054688, 0.8957532348632813, 0.8957552490234375, 0.89554638671875, 0.8954439697265625, 0.8958248901367187, 0.8957378540039063, 0.8956109008789063, 0.895552490234375, 0.8955668334960938, 0.8956426391601563, 0.8957552490234375, 0.8958330688476562, 0.8957849731445312, 0.8957276000976563, 0.895825927734375, 0.8963133544921875, 0.8962559814453125, 0.89609423828125, 0.8956958618164063, 0.8961679077148438, 0.8956907348632812, 0.8954450073242187, 0.8954439697265625, 0.8957511596679687, 0.8955791625976562, 0.8954685668945312, 0.8961618041992188, 0.8959201049804687, 0.8955299682617187, 0.8956661987304687, 0.8956989135742187, 0.8956763916015625, 0.895662109375, 0.8958924560546875, 1.8434007568359374, 0.8952268676757813, 0.8956057739257812, 0.8958668823242187, 0.8953005981445312, 0.8950661010742188, 0.8951449584960938, 0.8954685668945312, 0.8951981811523437, 0.8952791137695313, 0.8951910400390625, 0.895224853515625, 0.8951367797851563, 0.8953661499023438, 0.8954818725585938, 0.8955637817382812, 0.8956846313476563, 0.8963553466796875, 0.8955187377929688, 0.89537841796875, 0.8953743286132813, 0.895267822265625, 0.8952647705078125, 0.8953159790039062, 0.895520751953125, 0.8957470703125, 0.8953702392578125, 0.8953262329101562, 0.8954111938476562, 0.8956661987304687, 0.8953436279296875, 0.8954030151367187, 0.895283203125, 0.8952268676757813, 0.89514599609375, 0.8952166137695312, 0.8958832397460937, 0.89552587890625, 0.8954111938476562, 0.895561767578125, 0.8955135498046874, 0.895520751953125, 0.8956928100585938, 0.8956426391601563, 0.8954716186523437, 0.8955125732421875, 0.8955330810546875, 0.8961566772460937, 0.8961351928710938, 0.8959539184570312, 0.8957091674804688, 0.89584228515625, 0.8958197631835938, 0.8957470703125, 0.8957593383789062, 0.8957214965820313, 0.8955811767578125, 0.8955985717773437, 0.8961331176757813, 0.8957828979492187, 0.8960563354492187, 0.8956763916015625, 0.8957511596679687, 1.8439761962890624, 0.8954634399414062, 0.89548388671875, 0.8955084838867188, 0.89558837890625, 0.895562744140625, 0.8955801391601562, 0.895515625, 0.8953907470703125, 0.8954337158203125, 0.8955177001953125, 0.8955903930664062, 0.8959283447265625, 0.8955166625976563, 0.89547265625, 0.8954685668945312, 0.895752197265625, 0.895425537109375, 0.8954317016601563, 0.8954531860351562, 0.8953671875, 0.89560986328125, 0.8953211059570313, 0.8955269165039063, 0.8957849731445312, 0.8958924560546875, 0.8968284301757813, 0.8958668823242187, 0.8959989624023438, 0.8959027099609375, 0.8958955688476562, 0.8957982788085938, 0.8957511596679687, 0.895730712890625, 0.8959959106445312, 0.8958034057617188, 0.895724609375, 0.8958135375976563, 0.8960409545898438, 0.8961474609375, 0.8957860107421876, 0.8955709228515625, 0.8958484497070313, 0.89590576171875, 0.895847412109375, 0.895763427734375, 0.8960266723632813, 0.8961084594726563, 0.8961710205078125, 0.89615869140625, 0.8962139892578125, 0.895963134765625, 0.8957614135742188, 0.8957449951171875, 0.8958822631835938, 0.8960726928710937, 0.8957849731445312, 0.8957849731445312, 0.895599609375, 0.895626220703125, 0.8956661987304687, 0.8960829467773438, 0.8956692504882813]",tokens/s,1.09981480935664,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,5945.012224,19933.954048,0.0,19287.506944,18376.2688,s,10,24.42857763671875,2.442857763671875,0.001982296286527604,2.4421226806640624,2.4459630615234373,2.446148400878906,2.4462966723632813,"[2.44311767578125, 2.441929443359375, 2.441054443359375, 2.44091455078125, 2.441591064453125, 2.440709716796875, 2.44231591796875, 2.446333740234375, 2.445921875, 2.444689208984375]",tokens/s,104.79529500530754,kWh,2.881985050108698e-05,1.5794228899012522e-05,0.00013673797050139935,0.00018135204990149886,tokens/kWh,1411619.0036950014,MB,5946.937344,19933.954048,0.0,19287.506944,18871.985152,s,10,1453.1662812499999,145.316628125,0.01677016451823821,145.3091796875,145.3392140625,145.34826328125,145.35550265625,"[145.31590625, 145.308921875, 145.3094375, 145.306953125, 145.3573125, 145.337203125, 145.304828125, 145.303359375, 145.319953125, 145.30240625]",tokens/s,0.4335360709430169,kWh,0.0017155244359870752,0.0009402592492990879,0.00808164824309121,0.010737431928377374,tokens/kWh,5867.324740238933,,s,629,1472.8442895507828,2.3415648482524345,0.29007180854476533,2.306472900390625,2.307840771484375,2.308159619140625,4.7478279296874994,"[2.30830078125, 2.30712939453125, 2.305491943359375, 2.305958984375, 2.306070556640625, 2.306164794921875, 2.306714599609375, 2.306167724609375, 2.30651708984375, 2.306343994140625, 2.306281494140625, 2.3058994140625, 2.306171875, 2.305965087890625, 2.3060673828125, 2.305934326171875, 2.306259033203125, 2.30677197265625, 2.30823828125, 2.3065693359375, 2.3062998046875, 2.3065107421875, 2.305712158203125, 2.3060634765625, 2.30664404296875, 2.30611865234375, 2.305919921875, 2.306153564453125, 2.30573876953125, 2.307145751953125, 2.307577880859375, 2.30742529296875, 2.3060234375, 2.306620361328125, 2.30689892578125, 2.306671630859375, 2.30721337890625, 2.306621337890625, 2.30584423828125, 2.306515869140625, 2.306025390625, 2.307158935546875, 2.307966064453125, 2.3080498046875, 2.30725927734375, 2.30751123046875, 2.30584326171875, 2.306826171875, 2.30643505859375, 2.306669677734375, 2.3066572265625, 2.306987060546875, 2.3064228515625, 2.30631640625, 2.3060048828125, 2.3066552734375, 2.306758544921875, 2.306385986328125, 2.306724853515625, 2.30693994140625, 2.3067626953125, 2.306996337890625, 4.747927734375, 2.3060458984375, 2.306315185546875, 2.305916015625, 2.30605615234375, 2.30666455078125, 2.306251708984375, 2.306472900390625, 2.306552734375, 2.30586376953125, 2.3063408203125, 2.306269287109375, 2.306483154296875, 2.3060849609375, 2.306608154296875, 2.306598876953125, 2.306947021484375, 2.306627685546875, 2.30656298828125, 2.306156494140625, 2.305994873046875, 2.305953857421875, 2.3070966796875, 2.307031005859375, 2.306716552734375, 2.30727587890625, 2.305818603515625, 2.30561181640625, 2.306303955078125, 2.30622607421875, 2.30601513671875, 2.306629638671875, 2.306018310546875, 2.307220458984375, 2.307166259765625, 2.30818310546875, 2.30658251953125, 2.306062255859375, 2.3065908203125, 2.3057265625, 2.306356201171875, 2.30654150390625, 2.306720703125, 2.306249755859375, 2.307949462890625, 2.307349609375, 2.30730859375, 2.306469970703125, 2.306974609375, 2.30639501953125, 2.30609716796875, 2.30603369140625, 2.306250732421875, 2.306716552734375, 2.306428955078125, 2.3060244140625, 2.306080810546875, 2.306798583984375, 2.306280517578125, 2.306587646484375, 2.306314208984375, 2.3067607421875, 2.307210205078125, 4.74750048828125, 2.30618017578125, 2.306073486328125, 2.306304931640625, 2.3058740234375, 2.306080810546875, 2.3063828125, 2.30624560546875, 2.305426513671875, 2.306010009765625, 2.3058330078125, 2.306188232421875, 2.305746826171875, 2.30639501953125, 2.305995849609375, 2.305857421875, 2.305976318359375, 2.306387939453125, 2.307175537109375, 2.3076884765625, 2.30548486328125, 2.305490966796875, 2.3058544921875, 2.30586474609375, 2.306364501953125, 2.30583203125, 2.306301025390625, 2.306060302734375, 2.306914306640625, 2.308274169921875, 2.307685302734375, 2.3084912109375, 2.306903076171875, 2.30611669921875, 2.306157470703125, 2.30603466796875, 2.306026611328125, 2.30809912109375, 2.30824658203125, 2.30763623046875, 2.30674853515625, 2.306740234375, 2.30609521484375, 2.305721435546875, 2.30651806640625, 2.307560546875, 2.30752880859375, 2.306269287109375, 2.30721630859375, 2.306631591796875, 2.30655908203125, 2.3057724609375, 2.30613818359375, 2.306503662109375, 2.306617431640625, 2.307072021484375, 2.30647802734375, 2.305734619140625, 2.3066552734375, 2.306449462890625, 2.30620361328125, 2.30637158203125, 2.306595947265625, 4.74802880859375, 2.306186279296875, 2.305531982421875, 2.305669189453125, 2.306135986328125, 2.3058759765625, 2.306019287109375, 2.306431884765625, 2.306522216796875, 2.30567529296875, 2.3055830078125, 2.306220947265625, 2.30667578125, 2.306291748046875, 2.306946044921875, 2.30719287109375, 2.3062763671875, 2.30605517578125, 2.3061484375, 2.306335693359375, 2.30618115234375, 2.30607666015625, 2.30662646484375, 2.30637158203125, 2.30767724609375, 2.308170654296875, 2.30727978515625, 2.306090087890625, 2.3074775390625, 2.30679248046875, 2.308010009765625, 2.306175048828125, 2.3065908203125, 2.306902099609375, 2.307230712890625, 2.30744677734375, 2.307564453125, 2.306438232421875, 2.306532470703125, 2.306220947265625, 2.306926513671875, 2.30791162109375, 2.30597021484375, 2.30626416015625, 2.306064453125, 2.305848388671875, 2.30631005859375, 2.305681396484375, 2.306680908203125, 2.3066787109375, 2.306912353515625, 2.30727587890625, 2.30600390625, 2.306438232421875, 2.306324462890625, 2.305838134765625, 2.305875, 2.305974365234375, 2.30620166015625, 2.306227294921875, 2.3058759765625, 2.306018310546875, 2.306186279296875, 4.7475712890625, 2.3056015625, 2.305901611328125, 2.30658251953125, 2.307598388671875, 2.307072998046875, 2.30600390625, 2.308192138671875, 2.307829833984375, 2.30786865234375, 2.307883056640625, 2.306185302734375, 2.305741943359375, 2.30852392578125, 2.307919921875, 2.30782763671875, 2.30862841796875, 2.308886474609375, 2.308350830078125, 2.308662353515625, 2.308884521484375, 2.30721337890625, 2.307564453125, 2.307115966796875, 2.307040283203125, 2.30601025390625, 2.307072998046875, 2.305795166015625, 2.3058740234375, 2.306260009765625, 2.3076474609375, 2.3075, 2.305462158203125, 2.305919921875, 2.306641845703125, 2.308442138671875, 2.30645654296875, 2.306227294921875, 2.306621337890625, 2.306598876953125, 2.3067822265625, 2.306314208984375, 2.307274658203125, 2.306165771484375, 2.30636962890625, 2.306336669921875, 2.3068681640625, 2.307556396484375, 2.307939208984375, 2.306641845703125, 2.308420654296875, 2.30788720703125, 2.30803857421875, 2.307458984375, 2.308116455078125, 2.3085107421875, 2.30814306640625, 2.309021728515625, 2.30691845703125, 2.3075400390625, 2.30881787109375, 2.30786962890625, 2.308601806640625, 4.747927734375, 2.30775, 2.307747802734375, 2.306754638671875, 2.30704638671875, 2.30609521484375, 2.305871826171875, 2.305828857421875, 2.306450439453125, 2.3074384765625, 2.30639404296875, 2.306155517578125, 2.30721240234375, 2.3061943359375, 2.306260986328125, 2.3063408203125, 2.305946533203125, 2.30626708984375, 2.3059384765625, 2.306785400390625, 2.30590576171875, 2.307837890625, 2.308127685546875, 2.307727294921875, 2.30624755859375, 2.30803759765625, 2.308209716796875, 2.306083740234375, 2.308106201171875, 2.30796484375, 2.307409912109375, 2.307322998046875, 2.3070556640625, 2.30601416015625, 2.30601220703125, 2.30685400390625, 2.3067841796875, 2.3062333984375, 2.30788818359375, 2.3078798828125, 2.307981201171875, 2.308041748046875, 2.307943359375, 2.3078779296875, 2.306785400390625, 2.306217041015625, 2.30694189453125, 2.3059833984375, 2.30669921875, 2.30706787109375, 2.307828857421875, 2.3062119140625, 2.30769970703125, 2.30689990234375, 2.306872314453125, 2.30632763671875, 2.307541015625, 2.307304443359375, 2.306280517578125, 2.305987548828125, 2.3064677734375, 2.30799365234375, 2.30702490234375, 4.74871826171875, 2.306419677734375, 2.30594775390625, 2.305490966796875, 2.3053935546875, 2.306144287109375, 2.305890380859375, 2.30618310546875, 2.30565380859375, 2.305838134765625, 2.3064453125, 2.306163818359375, 2.305967041015625, 2.30632958984375, 2.30599365234375, 2.30660205078125, 2.306093017578125, 2.30550927734375, 2.3062958984375, 2.30859375, 2.307629150390625, 2.305786865234375, 2.30675244140625, 2.30698291015625, 2.307533935546875, 2.306155517578125, 2.30588916015625, 2.30687744140625, 2.30660205078125, 2.30624658203125, 2.30635205078125, 2.30655078125, 2.30631005859375, 2.307313720703125, 2.306417724609375, 2.306740234375, 2.306871337890625, 2.30619140625, 2.3060859375, 2.305734619140625, 2.3065068359375, 2.30648828125, 2.306944091796875, 2.30637060546875, 2.3063173828125, 2.305910888671875, 2.306567138671875, 2.305996826171875, 2.306691162109375, 2.306502685546875, 2.306821044921875, 2.306130859375, 2.30672900390625, 2.307013671875, 2.306767822265625, 2.306404296875, 2.30620458984375, 2.306543701171875, 2.30641552734375, 2.30656298828125, 2.306662353515625, 2.306578369140625, 2.30736083984375, 4.75277099609375, 2.306112548828125, 2.305987548828125, 2.306069580078125, 2.305553466796875, 2.305196044921875, 2.306450439453125, 2.306493408203125, 2.306361328125, 2.305670166015625, 2.305954833984375, 2.30636328125, 2.305942626953125, 2.305912841796875, 2.3060244140625, 2.306492431640625, 2.306491455078125, 2.3065087890625, 2.306021484375, 2.30662255859375, 2.306044921875, 2.305946533203125, 2.306109375, 2.3060673828125, 2.30651708984375, 2.306368408203125, 2.306166748046875, 2.305617919921875, 2.3070966796875, 2.30624462890625, 2.30612060546875, 2.30616259765625, 2.306585693359375, 2.3061943359375, 2.305982421875, 2.30618212890625, 2.30691015625, 2.307801025390625, 2.30774169921875, 2.305775634765625, 2.30598974609375, 2.307757080078125, 2.307713134765625, 2.3067412109375, 2.306273193359375, 2.306452392578125, 2.30667578125, 2.306123779296875, 2.307871826171875, 2.3065498046875, 2.306324462890625, 2.3063818359375, 2.306296875, 2.306356201171875, 2.306536376953125, 2.30616162109375, 2.306343994140625, 2.3068017578125, 2.307167236328125, 2.3066982421875, 2.3068271484375, 2.30626611328125, 2.306765869140625, 4.75146533203125, 2.305406005859375, 2.306103271484375, 2.306335693359375, 2.306105224609375, 2.305650634765625, 2.305935302734375, 2.3065302734375, 2.306123779296875, 2.306839599609375, 2.306525146484375, 2.3065908203125, 2.3057275390625, 2.3065927734375, 2.306326416015625, 2.305699951171875, 2.306766845703125, 2.306298828125, 2.306177978515625, 2.306017333984375, 2.3064453125, 2.307205078125, 2.3069912109375, 2.30652099609375, 2.307852294921875, 2.30791357421875, 2.307249267578125, 2.30723388671875, 2.308533203125, 2.307859375, 2.307365966796875, 2.30736474609375, 2.306230224609375, 2.306595947265625, 2.306797607421875, 2.306817138671875, 2.307945556640625, 2.306934814453125, 2.306532470703125, 2.306154541015625, 2.306238525390625, 2.306093994140625, 2.306994140625, 2.30653125, 2.30648828125, 2.30630810546875, 2.306410400390625, 2.30582177734375, 2.30702392578125, 2.306610107421875, 2.30685888671875, 2.30723388671875, 2.307060791015625, 2.30736083984375, 2.307283935546875, 2.306974609375, 2.307143798828125, 2.30702685546875, 2.307147705078125, 2.306912353515625, 2.306466796875, 2.30637060546875, 2.305808349609375, 4.75029296875, 2.3062138671875, 2.305847412109375, 2.305751953125, 2.306163818359375, 2.305503173828125, 2.306188232421875, 2.305998779296875, 2.306451416015625, 2.305594482421875, 2.30626513671875, 2.30557080078125, 2.306021484375, 2.305986572265625, 2.30660498046875, 2.306206787109375, 2.305995849609375, 2.306838623046875, 2.306428955078125, 2.306533447265625, 2.306038818359375, 2.306441162109375, 2.306575439453125, 2.30774267578125, 2.30721435546875, 2.307527587890625, 2.30657421875, 2.3058974609375, 2.30637255859375, 2.305725341796875, 2.30624560546875, 2.306049072265625, 2.3060224609375, 2.306079833984375, 2.306041748046875, 2.305879150390625, 2.30664404296875, 2.306240478515625, 2.30664697265625, 2.306007080078125, 2.306346923828125, 2.3059384765625, 2.305849365234375, 2.306935791015625, 2.306491455078125, 2.306400146484375, 2.3062744140625, 2.306458740234375, 2.306747314453125, 2.307177490234375, 2.306862060546875, 2.306808837890625, 2.306905029296875, 2.30681298828125, 2.306693115234375, 2.30618017578125, 2.30659375, 2.306906005859375, 2.30719482421875, 2.306628662109375, 2.306923583984375, 2.306654296875, 2.3069306640625]",tokens/s,0.4270648326252095,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpxdn2idv5/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2454.69184,7298.613248,0.0,6652.166144,6323.221504,s,10,7.735312255859376,0.7735312255859375,0.0028429451285762273,0.7729794311523437,0.7762465698242188,0.7783619323730468,0.7800542224121094,"[0.780477294921875, 0.7757764892578125, 0.7711934204101563, 0.770524658203125, 0.7719502563476562, 0.7706873168945313, 0.7725225219726563, 0.7734363403320312, 0.7744036254882812, 0.77434033203125]",tokens/s,330.9497943099635,kWh,9.102796527565035e-06,4.9879214771863185e-06,4.3937086431680494e-05,5.802780443643185e-05,tokens/kWh,4411678.202997362,MB,2454.69184,7298.613248,0.0,6652.166144,6382.564864,s,10,458.5464296875,45.85464296875,0.0073453057278341505,45.852935546875,45.866698828124996,45.8672068359375,45.8676132421875,"[45.8568515625, 45.857828125, 45.85469140625, 45.8511796875, 45.845234375, 45.8491953125, 45.85046484375, 45.84668359375, 45.8665859375, 45.86771484375]",tokens/s,1.3739066738112995,kWh,0.0005414656486152074,0.00029677022395511814,0.002569976393586324,0.003408212266156649,tokens/kWh,18484.764175514054,,s,629,464.77344433593714,0.7389084965595191,0.09178579233429768,0.727773193359375,0.7285446655273438,0.7288094848632812,1.4987512353515624,"[0.72828515625, 0.7276810302734374, 0.7278029174804688, 0.72793701171875, 0.7272877807617187, 0.727383056640625, 0.7272335205078125, 0.7274444580078125, 0.7278551025390625, 0.727720947265625, 0.7276083374023438, 0.7281663818359375, 0.7275867919921875, 0.72804248046875, 0.72800048828125, 0.7276687622070312, 0.7281151733398438, 0.7279267578125, 0.7276728515625, 0.7276615600585937, 0.7278981323242187, 0.7287255249023438, 0.7286405029296875, 0.7275847778320312, 0.7281510620117188, 0.7281069946289063, 0.7279093627929687, 0.7283015747070313, 0.7283804321289062, 0.7276973876953124, 0.7277127685546875, 0.7276400756835938, 0.7282882690429687, 0.7277936401367188, 0.7284705200195313, 0.727784423828125, 0.727952392578125, 0.727930908203125, 0.728369140625, 0.728827880859375, 0.7278551025390625, 0.72924365234375, 0.7275161743164062, 0.7274977416992188, 0.7274014892578125, 0.727947265625, 0.7273707275390625, 0.7273738403320312, 0.7270891723632813, 0.72770458984375, 0.727604248046875, 0.7274721069335938, 0.728226806640625, 0.7281571655273438, 0.7279820556640625, 0.7282749633789063, 0.7275151977539063, 0.7279564208984375, 0.7273912353515625, 0.7277537231445312, 0.7282349853515625, 0.7278714599609375, 1.503824951171875, 0.72740966796875, 0.7277650146484375, 0.7272868041992188, 0.7274219360351563, 0.7278981323242187, 0.7287039794921875, 0.7276185302734375, 0.7274321899414062, 0.7271383056640625, 0.72722021484375, 0.7271024780273437, 0.7274373168945313, 0.7272499389648438, 0.7280332641601562, 0.7276113891601562, 0.7276943359375, 0.7272652587890625, 0.7283251342773438, 0.7285104370117188, 0.7281961059570312, 0.7280137939453125, 0.7283834838867187, 0.7276124267578125, 0.7279022216796875, 0.7284910278320312, 0.7285155639648437, 0.728142822265625, 0.7280394287109375, 0.7274547119140625, 0.7279277954101563, 0.7275509643554687, 0.7277987670898437, 0.7278591918945313, 0.728226806640625, 0.7301795654296875, 0.726887451171875, 0.727372802734375, 0.7274393310546875, 0.7276984252929688, 0.72789404296875, 0.7279503173828125, 0.7277659912109375, 0.7282677612304688, 0.7285678100585937, 0.7279226684570312, 0.7286671142578125, 0.7283712158203125, 0.7282360229492187, 0.727741455078125, 0.7277588500976563, 0.7276482543945313, 0.727857177734375, 0.7283681030273438, 0.7278796997070313, 0.7283937377929688, 0.727920654296875, 0.7278295288085938, 0.728015869140625, 0.7276452026367187, 0.7281694946289062, 0.7286343383789062, 0.7274926147460937, 1.4987724609375, 0.7273533325195313, 0.727457763671875, 0.7278960571289063, 0.7272560424804687, 0.7277005004882813, 0.7277772827148438, 0.7281305541992188, 0.7275653076171875, 0.727499755859375, 0.7276851196289063, 0.7280271606445312, 0.7270768432617187, 0.7270553588867188, 0.7272847290039063, 0.7274874877929688, 0.7273748779296875, 0.727520263671875, 0.727920654296875, 0.7277373657226562, 0.7273338623046876, 0.728322021484375, 0.7278253784179688, 0.7282452392578125, 0.72855859375, 0.7283507080078125, 0.7274547119140625, 0.727931884765625, 0.7276277465820312, 0.7273011474609375, 0.7271905517578126, 0.727736328125, 0.7286302490234375, 0.728036376953125, 0.727710693359375, 0.7286405029296875, 0.728036376953125, 0.7283230590820312, 0.72854833984375, 0.7284182739257813, 0.7284367065429688, 0.729618408203125, 0.7277322387695313, 0.7279042358398438, 0.7280332641601562, 0.7283035888671875, 0.7280281372070313, 0.7276656494140625, 0.7277639770507812, 0.7281858520507812, 0.7287337036132813, 0.7282145385742187, 0.7282606201171875, 0.7274495849609375, 0.727203857421875, 0.7272243041992188, 0.727235595703125, 0.7272662963867188, 0.7274495849609375, 0.727531494140625, 0.7289282836914063, 0.7277404174804688, 0.7281520385742187, 1.498346435546875, 0.7277035522460937, 0.7284224243164062, 0.7275243530273438, 0.727741455078125, 0.727731201171875, 0.7279708251953125, 0.7275581665039063, 0.7280773315429687, 0.7281356811523437, 0.7283568725585937, 0.7276912841796875, 0.7285411987304687, 0.7277701416015625, 0.7284715576171875, 0.72749462890625, 0.7280516967773437, 0.7273963623046875, 0.7273779296875, 0.7273543701171875, 0.7274270629882813, 0.7274116821289063, 0.727689208984375, 0.7282974853515625, 0.7277066040039063, 0.7288258666992188, 0.72807421875, 0.727720947265625, 0.728501220703125, 0.7287817993164063, 0.728057861328125, 0.7277598876953125, 0.72751513671875, 0.7276728515625, 0.7286773681640625, 0.7274321899414062, 0.7279042358398438, 0.727920654296875, 0.727573486328125, 0.7276513061523437, 0.7273072509765625, 0.7275028686523437, 0.7276226806640625, 0.727731201171875, 0.7275745239257813, 0.727709716796875, 0.7274833984375, 0.7272919311523437, 0.7273953247070313, 0.7276328735351563, 0.7272529907226563, 0.7279380493164063, 0.7277659912109375, 0.7278837890625, 0.7274598388671875, 0.7278622436523438, 0.727636962890625, 0.7282718505859375, 0.7279830932617187, 0.7275888671875, 0.72747314453125, 0.7274137573242188, 0.7276810302734374, 1.500190673828125, 0.7274137573242188, 0.727794677734375, 0.72776806640625, 0.7275325317382813, 0.728131591796875, 0.7274035034179688, 0.7273656616210937, 0.727741455078125, 0.7269683227539062, 0.7274475708007813, 0.727498779296875, 0.7274158325195312, 0.7276226806640625, 0.7277803344726562, 0.7272796020507812, 0.7277967529296875, 0.7278120727539062, 0.7281182861328125, 0.7282227172851562, 0.729248779296875, 0.7279073486328125, 0.727394287109375, 0.7274772338867187, 0.7273768920898438, 0.7279462280273438, 0.7280506591796875, 0.7277485961914063, 0.7276800537109375, 0.7273492431640625, 0.7277352905273438, 0.7280148315429688, 0.7277557983398437, 0.7279503173828125, 0.72796875, 0.727677978515625, 0.7273717651367188, 0.7274014892578125, 0.7277393798828125, 0.7273564453125, 0.7273922729492187, 0.7276431274414062, 0.7273267211914063, 0.727183349609375, 0.7276964111328125, 0.7270574340820313, 0.7279892578125, 0.7274506225585937, 0.7283281860351563, 0.7276236572265625, 0.7274864501953126, 0.7277271118164063, 0.7274741821289062, 0.7276431274414062, 0.727984130859375, 0.7277352905273438, 0.727568359375, 0.7290419311523437, 0.7279063110351562, 0.7274669799804687, 0.7279380493164063, 0.7273656616210937, 0.727736328125, 1.4986966552734375, 0.7277783203125, 0.7279697875976563, 0.7273380126953125, 0.7279759521484375, 0.7274772338867187, 0.7276113891601562, 0.72751513671875, 0.727520263671875, 0.7276339111328125, 0.7279185791015625, 0.7278212890625, 0.7281500244140625, 0.7277086791992188, 0.7276032104492187, 0.7277926635742188, 0.7276728515625, 0.7275827026367188, 0.7275140991210938, 0.7280179443359375, 0.7281879272460937, 0.7278212890625, 0.727699462890625, 0.72736767578125, 0.7276728515625, 0.7279083251953125, 0.7273850708007813, 0.7275069580078125, 0.7274024658203125, 0.7273922729492187, 0.7274700927734375, 0.7275847778320312, 0.7282288818359375, 0.7278090209960938, 0.7279667358398437, 0.7283240966796874, 0.7274004516601562, 0.7275899047851563, 0.729038818359375, 0.728173583984375, 0.727530517578125, 0.7272581176757813, 0.727572509765625, 0.7277127685546875, 0.72736767578125, 0.7276503295898438, 0.7283455810546875, 0.72766259765625, 0.7285360717773437, 0.7277281494140625, 0.72745166015625, 0.7276943359375, 0.7278653564453125, 0.7278253784179688, 0.7273615112304688, 0.7273717651367188, 0.7274547119140625, 0.7275899047851563, 0.7274557495117188, 0.7279779663085938, 0.7282565307617187, 0.728158203125, 0.7288606567382813, 1.500291015625, 0.7285186767578125, 0.7282554931640625, 0.727962646484375, 0.72707275390625, 0.7270717163085938, 0.7278837890625, 0.7273738403320312, 0.7277557983398437, 0.7276339111328125, 0.728394775390625, 0.7282606201171875, 0.728369140625, 0.727878662109375, 0.7280169067382812, 0.727794677734375, 0.7282175903320313, 0.7275038452148438, 0.7273502807617187, 0.7271946411132812, 0.7275499267578125, 0.7277998046875, 0.7273502807617187, 0.7283394775390625, 0.7281551513671874, 0.7276728515625, 0.729017333984375, 0.7274383544921875, 0.7284019165039063, 0.7275847778320312, 0.727709716796875, 0.7274024658203125, 0.7274024658203125, 0.7272263793945313, 0.7275448608398437, 0.727203857421875, 0.7272734985351562, 0.7281172485351562, 0.72766259765625, 0.7277854614257813, 0.7278192749023438, 0.7277168579101563, 0.7283128051757812, 0.7274014892578125, 0.7279892578125, 0.7273103637695313, 0.7277905883789062, 0.72785302734375, 0.7278776245117188, 0.727984130859375, 0.7275591430664062, 0.7280732421875, 0.728015869140625, 0.7281817626953125, 0.7277322387695313, 0.72749365234375, 0.7281449584960937, 0.7275447387695313, 0.7287470092773437, 0.727857177734375, 0.727446533203125, 0.7272703857421875, 0.7274813232421875, 1.5015628662109375, 0.7280670776367187, 0.7274475708007813, 0.7279002075195312, 0.7276564331054688, 0.727930908203125, 0.7274649658203125, 0.7278028564453125, 0.7277875366210937, 0.7275980834960938, 0.7279329223632812, 0.7276553955078126, 0.7276932983398438, 0.7277240600585938, 0.7281275024414062, 0.7278305053710937, 0.7283988647460937, 0.727973876953125, 0.7275120849609376, 0.7283138427734375, 0.7280885620117188, 0.728431640625, 0.7279882202148438, 0.7285555419921875, 0.727773193359375, 0.7274137573242188, 0.727572509765625, 0.7277168579101563, 0.7279124755859375, 0.7280169067382812, 0.7274721069335938, 0.7271577758789063, 0.7273502807617187, 0.7277578125, 0.72791552734375, 0.7280302124023438, 0.7272263793945313, 0.727066650390625, 0.7275479125976563, 0.7274383544921875, 0.727099365234375, 0.7272929077148438, 0.7277578125, 0.728215576171875, 0.727783447265625, 0.7276656494140625, 0.72743115234375, 0.7274690551757812, 0.7273277587890625, 0.7281571655273438, 0.7275397338867188, 0.727414794921875, 0.7275867919921875, 0.7271116943359375, 0.727257080078125, 0.727667724609375, 0.7276851196289063, 0.7272703857421875, 0.7279493408203125, 0.7277250366210938, 0.7286610107421875, 0.7273246459960937, 0.7279380493164063, 1.50266162109375, 0.72736767578125, 0.7276441650390625, 0.727141357421875, 0.7274188842773438, 0.7278018798828125, 0.727625732421875, 0.727541748046875, 0.7280783081054687, 0.7277342529296875, 0.72876953125, 0.728369140625, 0.7288411865234375, 0.7279749145507812, 0.7283046264648437, 0.72797900390625, 0.7282718505859375, 0.7281930541992188, 0.728658935546875, 0.7289108276367188, 0.7287183227539062, 0.7283169555664063, 0.7286814575195313, 0.7278428344726563, 0.7285463256835938, 0.72789404296875, 0.72875830078125, 0.72804248046875, 0.7291146240234375, 0.7289763793945313, 0.7286661376953125, 0.728784912109375, 0.728363037109375, 0.7279595336914062, 0.7274700927734375, 0.7275233154296875, 0.7272816772460937, 0.7279974365234375, 0.7284090576171875, 0.72762060546875, 0.728300537109375, 0.727457763671875, 0.7274711303710938, 0.727183349609375, 0.7278960571289063, 0.7272171630859375, 0.7289682006835938, 0.7285933837890625, 0.7276564331054688, 0.7285442504882812, 0.7269273681640624, 0.72795751953125, 0.7275642700195313, 0.7277936401367188, 0.7290480346679687, 0.72764208984375, 0.727804931640625, 0.7280322265625, 0.7278858032226563, 0.727383056640625, 0.7281940307617187, 0.7273421020507812, 0.72789404296875, 1.502066650390625, 0.7280179443359375, 0.7290101928710937, 0.7292303466796874, 0.7280660400390625, 0.728395751953125, 0.72835791015625, 0.72821142578125, 0.7285330200195312, 0.7277250366210938, 0.728616943359375, 0.7284172973632812, 0.7285718994140625, 0.728131591796875, 0.7278837890625, 0.7286558837890625, 0.7279124755859375, 0.7291678466796875, 0.728326171875, 0.7273584594726562, 0.7271588134765625, 0.7275980834960938, 0.7273738403320312, 0.7279912719726562, 0.72772607421875, 0.7297515258789062, 0.7277824096679687, 0.7272632446289062, 0.7278909301757812, 0.7273318481445312, 0.7274424438476562, 0.7272509155273438, 0.7275796508789063, 0.7279483032226562, 0.7278059692382812, 0.7278919677734375, 0.7276881713867187, 0.7284940795898438, 0.7285985107421875, 0.7283128051757812, 0.7282175903320313, 0.7282974853515625, 0.728426513671875, 0.7279165649414062, 0.7284940795898438, 0.7287070922851563, 0.7288955078125, 0.7288780517578125, 0.7277035522460937, 0.72800048828125, 0.72765234375, 0.728056884765625, 0.7274495849609375, 0.7280240478515625, 0.727583740234375, 0.727520263671875, 0.7282698364257812, 0.7273543701171875, 0.7287091064453125, 0.7278960571289063, 0.7277035522460937, 0.7276973876953124, 0.727141357421875]",tokens/s,1.3533475452727455,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1477.30432,8196.194304,0.0,7549.7472,6941.631488,s,10,6.0462579345703125,0.6046257934570314,0.0007435687799022576,0.604391815185547,0.6052782531738281,0.6058451202392579,0.6062986138916016,"[0.6064119873046875, 0.6051522827148438, 0.6040460205078125, 0.6039584350585937, 0.6041729736328125, 0.603818603515625, 0.6042236328125, 0.6045599975585938, 0.6048197021484375, 0.6050942993164062]",tokens/s,423.4023800676526,kWh,7.135871880584294e-06,3.909028176627437e-06,3.5656695191997026e-05,4.670159524920876e-05,tokens/kWh,5481611.466030966,MB,1477.30432,8196.194304,0.0,7549.7472,7094.078464,s,10,357.59960156249997,35.759960156249996,0.0025836315483325416,35.759828125,35.7629109375,35.7636703125,35.7642778125,"[35.7644296875, 35.76244140625, 35.755375, 35.758703125, 35.75703515625, 35.75961328125, 35.7587890625, 35.76004296875, 35.7604296875, 35.7627421875]",tokens/s,1.7617469293793129,kWh,0.0004221709076563517,0.00023138749488215953,0.002133793484811199,0.0027873518873497106,tokens/kWh,22602.097814030254,,s,629,362.47207647705085,0.5762672122051682,0.07178708636540315,0.567605224609375,0.5678377075195312,0.5679312866210938,1.1716501220703124,"[0.5675867919921875, 0.5674536743164063, 0.5675294799804688, 0.5675069580078125, 0.5674711303710938, 0.56766259765625, 0.5675397338867187, 0.567414794921875, 0.5675397338867187, 0.567857177734375, 0.5676984252929688, 0.5676615600585937, 0.5676932983398437, 0.567741455078125, 0.567667724609375, 0.5677650146484375, 0.5679800415039062, 0.5678551025390625, 0.5676390380859375, 0.5676298217773438, 0.567710693359375, 0.567710693359375, 0.567568359375, 0.567900146484375, 0.5677824096679688, 0.567510009765625, 0.56774755859375, 0.5680036010742188, 0.56772607421875, 0.5676021728515624, 0.5675284423828125, 0.5676349487304687, 0.5676973876953125, 0.567857177734375, 0.5677864990234375, 0.5676759033203125, 0.5677332763671875, 0.567736328125, 0.5676236572265625, 0.5676482543945313, 0.5678510131835938, 0.5677496337890625, 0.5674977416992187, 0.5674495849609374, 0.5680199584960938, 0.5677691040039062, 0.5677250366210937, 0.5677158203125, 0.5677281494140625, 0.5677322387695313, 0.5676615600585937, 0.5676973876953125, 0.5678653564453126, 0.5677117309570312, 0.5676216430664063, 0.56771484375, 0.5676461791992188, 0.5675673828125, 0.5677025146484375, 0.5677178955078125, 0.5677588500976563, 0.5677485961914063, 1.1728394775390625, 0.5675120849609375, 0.5672499389648438, 0.5673809814453125, 0.567425048828125, 0.5678643188476562, 0.5675172119140625, 0.5675909423828125, 0.5675530395507813, 0.5675233154296875, 0.5674926147460938, 0.567546875, 0.5676093139648437, 0.567673828125, 0.5677547607421874, 0.56760009765625, 0.56751513671875, 0.5678336181640625, 0.5675867919921875, 0.567462890625, 0.5675970458984375, 0.5676533813476563, 0.56762060546875, 0.5676195678710938, 0.5676820678710938, 0.5676759033203125, 0.5675878295898438, 0.5676216430664063, 0.5675888671875, 0.5675222778320312, 0.567667724609375, 0.5676400756835938, 0.5675427856445312, 0.567562255859375, 0.5677056274414063, 0.567699462890625, 0.5677117309570312, 0.5678643188476562, 0.56776806640625, 0.5677557983398438, 0.5678028564453125, 0.5681766357421875, 0.5678325805664063, 0.5677014770507812, 0.5676093139648437, 0.5677567749023438, 0.5677496337890625, 0.5676830444335937, 0.5678008422851563, 0.5677178955078125, 0.5676165161132812, 0.5676011352539062, 0.5678192749023437, 0.5676881713867188, 0.5675899047851563, 0.5676656494140625, 0.5677383422851563, 0.5676973876953125, 0.5679749145507812, 0.5676431274414062, 0.567762939453125, 0.5677230224609375, 0.567647216796875, 1.1716873779296875, 0.5677588500976563, 0.5675479125976562, 0.567457763671875, 0.5674711303710938, 0.5678981323242187, 0.5679073486328124, 0.5674229736328125, 0.5675980834960938, 0.5674864501953125, 0.56749462890625, 0.5673656616210937, 0.5676482543945313, 0.5675530395507813, 0.5675089721679687, 0.5674639282226562, 0.5674158325195312, 0.5674301147460937, 0.5674035034179687, 0.5674526977539063, 0.567446533203125, 0.5673840942382813, 0.567498779296875, 0.56760009765625, 0.5675858154296874, 0.5674158325195312, 0.56747314453125, 0.567436279296875, 0.5673809814453125, 0.567546875, 0.5677117309570312, 0.5675601806640626, 0.5675089721679687, 0.5675140991210937, 0.5675765991210937, 0.5675919189453125, 0.5674424438476563, 0.5677158203125, 0.5675878295898438, 0.56744140625, 0.5677056274414063, 0.5677404174804688, 0.5675233154296875, 0.5674649658203125, 0.567341064453125, 0.5675530395507813, 0.5674721069335937, 0.5675714721679688, 0.5675264282226562, 0.5675673828125, 0.5675612182617188, 0.5674956665039063, 0.5675284423828125, 0.5675427856445312, 0.5675919189453125, 0.567605224609375, 0.5675079956054687, 0.5675581665039062, 0.5677537231445312, 0.5677322387695313, 0.5675714721679688, 0.5675407104492187, 0.5676267700195312, 1.1715543212890625, 0.56745166015625, 0.5672324829101563, 0.5672796020507812, 0.5673430786132813, 0.5674086303710938, 0.5673318481445313, 0.56740966796875, 0.5675089721679687, 0.5676113891601563, 0.5675950317382813, 0.5674301147460937, 0.5677127685546876, 0.5675438232421876, 0.5674536743164063, 0.5673994140625, 0.5674403686523437, 0.5674127197265625, 0.5678090209960938, 0.5673983764648437, 0.5675172119140625, 0.5676820678710938, 0.5676349487304687, 0.5673932495117188, 0.567530517578125, 0.5676093139648437, 0.5676400756835938, 0.5676165161132812, 0.56760009765625, 0.5676707763671875, 0.5676339111328125, 0.567583740234375, 0.567468017578125, 0.5675867919921875, 0.5676011352539062, 0.5675888671875, 0.5676216430664063, 0.5679401245117187, 0.5675479125976562, 0.5676113891601563, 0.56771484375, 0.5675612182617188, 0.567562255859375, 0.5677076416015625, 0.5675817260742188, 0.5677230224609375, 0.5677875366210937, 0.56810595703125, 0.5677905883789063, 0.5677076416015625, 0.56764111328125, 0.5676318969726563, 0.5676728515625, 0.5677659912109375, 0.567630859375, 0.5676646118164063, 0.5677199096679687, 0.567635986328125, 0.5677271118164062, 0.567751708984375, 0.5676656494140625, 0.5677987670898438, 0.5677404174804688, 1.171937255859375, 0.5675632934570313, 0.567510009765625, 0.5676011352539062, 0.5675110473632813, 0.5675509643554687, 0.5675704345703125, 0.5676400756835938, 0.5673451538085937, 0.5674536743164063, 0.5673789672851562, 0.5673011474609375, 0.5673953247070312, 0.567414794921875, 0.5674506225585938, 0.5674485473632812, 0.5674711303710938, 0.5674024658203125, 0.5676707763671875, 0.5677967529296875, 0.5677352905273437, 0.5674874877929688, 0.5676021728515624, 0.5673779296875, 0.5679329223632813, 0.5678540649414062, 0.5675560913085937, 0.5674352416992188, 0.5675601806640626, 0.5673789672851562, 0.5673871459960937, 0.5673840942382813, 0.5674967041015625, 0.5679431762695313, 0.5675222778320312, 0.5674014892578125, 0.5675642700195312, 0.5678223266601562, 0.5675274047851563, 0.5674485473632812, 0.5675612182617188, 0.5675130615234375, 0.5677005004882812, 0.5681069946289062, 0.5675448608398438, 0.56759912109375, 0.5675233154296875, 0.5674772338867188, 0.5674833984375, 0.5675765991210937, 0.5675601806640626, 0.5674649658203125, 0.567468017578125, 0.5676585083007812, 0.5677854614257812, 0.5675899047851563, 0.567546875, 0.5675213012695313, 0.5679288330078125, 0.567762939453125, 0.5678182373046875, 0.5676216430664063, 0.5676011352539062, 1.17129931640625, 0.5673502807617188, 0.567215087890625, 0.5675376586914063, 0.56736767578125, 0.5673421020507813, 0.5673707275390625, 0.567426025390625, 0.5676564331054688, 0.5675714721679688, 0.5674864501953125, 0.5673482055664063, 0.5674526977539063, 0.5674854125976563, 0.5675048828125, 0.5674536743164063, 0.5673359375, 0.56749462890625, 0.5675018310546875, 0.5674526977539063, 0.5674557495117187, 0.5676062622070313, 0.5678377075195312, 0.5674813232421875, 0.5677189331054687, 0.5678090209960938, 0.5677772827148437, 0.5675213012695313, 0.567751708984375, 0.5677783203125, 0.5676656494140625, 0.5676830444335937, 0.5675755615234375, 0.5678561401367187, 0.567636962890625, 0.5677291259765626, 0.5675059204101562, 0.567667724609375, 0.56776806640625, 0.5675161743164062, 0.5675540771484375, 0.5675079956054687, 0.5677742309570313, 0.5676452026367188, 0.56749462890625, 0.5674864501953125, 0.567593994140625, 0.56757861328125, 0.5675172119140625, 0.5676615600585937, 0.56762060546875, 0.5679359741210938, 0.5679605712890625, 0.5678868408203125, 0.5678858032226562, 0.567710693359375, 0.5680302124023437, 0.5678018798828125, 0.5676083374023437, 0.5676615600585937, 0.5680117797851563, 0.5678970947265625, 0.56764111328125, 1.1718861083984375, 0.5675161743164062, 0.5677250366210937, 0.5673861694335938, 0.567342041015625, 0.5673369750976562, 0.5673584594726563, 0.5674086303710938, 0.5673850708007813, 0.5677219848632813, 0.56757861328125, 0.56755712890625, 0.5675601806640626, 0.5676646118164063, 0.5676544189453125, 0.5674854125976563, 0.5673922729492188, 0.567499755859375, 0.5677189331054687, 0.5675161743164062, 0.5676431274414062, 0.56751513671875, 0.567520263671875, 0.5674598388671875, 0.5675693969726563, 0.5675233154296875, 0.5675233154296875, 0.5677301635742188, 0.5675919189453125, 0.5674035034179687, 0.5674905395507812, 0.5677189331054687, 0.5676380004882813, 0.5675653076171875, 0.567541748046875, 0.5674424438476563, 0.5674383544921875, 0.5678059692382813, 0.567689208984375, 0.56762060546875, 0.5675601806640626, 0.5675079956054687, 0.5674854125976563, 0.5674926147460938, 0.56753564453125, 0.5678244018554688, 0.56768408203125, 0.5676267700195312, 0.5678377075195312, 0.5678120727539062, 0.567804931640625, 0.5675601806640626, 0.5677752075195313, 0.5677711181640624, 0.5676748657226562, 0.5678848266601563, 0.5678233642578125, 0.5676984252929688, 0.5676134643554688, 0.56768408203125, 0.5676605224609375, 0.56764111328125, 0.5679544067382812, 1.172231201171875, 0.5673134155273437, 0.5675560913085937, 0.5677117309570312, 0.567499755859375, 0.5674158325195312, 0.5673758544921875, 0.56732568359375, 0.5674649658203125, 0.5678387451171875, 0.5675479125976562, 0.5674649658203125, 0.5675950317382813, 0.5675284423828125, 0.5674014892578125, 0.5673430786132813, 0.5677230224609375, 0.5675120849609375, 0.5674485473632812, 0.5674014892578125, 0.5676533813476563, 0.5676400756835938, 0.567568359375, 0.5674219360351562, 0.5676533813476563, 0.567604248046875, 0.5677455444335937, 0.5675950317382813, 0.5676881713867188, 0.567636962890625, 0.5676769409179687, 0.5676810302734375, 0.56772607421875, 0.567942138671875, 0.5678171997070313, 0.5677557983398438, 0.5677598876953125, 0.5678653564453126, 0.568068115234375, 0.5676103515625, 0.5676461791992188, 0.5675264282226562, 0.5675867919921875, 0.56753564453125, 0.5677793579101562, 0.567762939453125, 0.567562255859375, 0.5676615600585937, 0.5673656616210937, 0.5675407104492187, 0.5676349487304687, 0.567783447265625, 0.5676871948242187, 0.567531494140625, 0.5677056274414063, 0.5678079833984375, 0.5679267578125, 0.5676380004882813, 0.5675755615234375, 0.567530517578125, 0.5676113891601563, 0.5678325805664063, 0.5676062622070313, 1.172675537109375, 0.567414794921875, 0.56749462890625, 0.5675540771484375, 0.5675540771484375, 0.5675079956054687, 0.5674649658203125, 0.5674332275390624, 0.5676083374023437, 0.5674967041015625, 0.5675346069335937, 0.567309326171875, 0.5674373168945313, 0.5676553955078125, 0.5676759033203125, 0.5677711181640624, 0.5675499267578125, 0.5675806884765625, 0.5676011352539062, 0.5674813232421875, 0.5675172119140625, 0.5674291381835938, 0.567647216796875, 0.5675264282226562, 0.5675950317382813, 0.5675899047851563, 0.5676093139648437, 0.5676820678710938, 0.5677025146484375, 0.567605224609375, 0.5676226806640625, 0.5677219848632813, 0.5676124267578125, 0.5677138061523438, 0.5676380004882813, 0.5676656494140625, 0.5676482543945313, 0.56774658203125, 0.5677158203125, 0.5675950317382813, 0.5677445068359375, 0.5677701416015625, 0.5675632934570313, 0.567531494140625, 0.56770458984375, 0.5677557983398438, 0.5675817260742188, 0.5675407104492187, 0.5675028686523438, 0.567583740234375, 0.5677485961914063, 0.5675714721679688, 0.56776806640625, 0.5676380004882813, 0.5676871948242187, 0.5676564331054688, 0.5675089721679687, 0.5678407592773438, 0.567920654296875, 0.56778955078125, 0.5677639770507813, 0.5679882202148437, 0.5677393798828125, 1.17273193359375, 0.56749365234375, 0.5674434814453125, 0.5675346069335937, 0.5677537231445312, 0.5674434814453125, 0.5675059204101562, 0.5675284423828125, 0.567583740234375, 0.5678008422851563, 0.567736328125, 0.5674874877929688, 0.5675048828125, 0.567436279296875, 0.5674956665039063, 0.5676564331054688, 0.567647216796875, 0.567720947265625, 0.56751513671875, 0.5676380004882813, 0.5676185302734374, 0.5676134643554688, 0.5674403686523437, 0.5674761962890625, 0.5675120849609375, 0.5675899047851563, 0.5674024658203125, 0.5674649658203125, 0.5675560913085937, 0.5677567749023438, 0.5676615600585937, 0.5677230224609375, 0.56781005859375, 0.567562255859375, 0.5678551025390625, 0.5675980834960938, 0.5675233154296875, 0.5675581665039062, 0.5675059204101562, 0.5674721069335937, 0.5674967041015625, 0.567530517578125, 0.5675007934570313, 0.5675028686523438, 0.5679646606445312, 0.567546875, 0.5677957153320312, 0.567920654296875, 0.567794677734375, 0.5679216918945312, 0.5680087280273437, 0.5677936401367187, 0.56793701171875, 0.5678694458007812, 0.5679564819335937, 0.56789404296875, 0.5677813720703125, 0.5679912719726562, 0.5678223266601562, 0.5678090209960938, 0.5678837890625, 0.5678981323242187, 0.5679011840820313]",tokens/s,1.7353060851290814,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1608.957952,5448.925184,0.0,4802.47808,4489.12128,s,10,5.066485656738281,0.5066485656738281,0.0011948895817140703,0.5068220977783202,0.5079840667724609,0.5080279983520508,0.5080631436157227,"[0.5075723266601563, 0.5061219482421875, 0.504515380859375, 0.5053372497558594, 0.5061441345214843, 0.5055963745117188, 0.5075000610351562, 0.5076519470214844, 0.5080719299316406, 0.5079743041992187]",tokens/s,505.2812093912223,kWh,5.964234835571714e-06,3.2679267622643234e-06,2.7223952334698433e-05,3.6456113932534466e-05,tokens/kWh,7022141.758547072,MB,1610.637312,5448.925184,0.0,4802.47808,4557.793792,s,10,299.1010234375,29.910102343749998,0.01023537902845777,29.913578125,29.921135546875,29.9219037109375,29.9225182421875,"[29.899591796875, 29.901322265625, 29.913013671875, 29.890712890625, 29.920396484375, 29.922671875, 29.92096484375, 29.9152421875, 29.90296484375, 29.914142578125]",tokens/s,2.106311749654192,kWh,0.00035296389131082433,0.00019345485010443554,0.0015722178549955057,0.0021186365964107655,tokens/kWh,29736.104864198915,,s,629,303.18425177001956,0.4820099392210167,0.06023141623810366,0.4746004333496094,0.4757639221191406,0.4761423828125,0.98077423828125,"[0.474365966796875, 0.47431475830078124, 0.4748533630371094, 0.47396148681640626, 0.47435162353515625, 0.47414068603515624, 0.47463320922851565, 0.47518923950195313, 0.47532952880859375, 0.4745502624511719, 0.4740556945800781, 0.47391131591796876, 0.47444989013671873, 0.47430245971679685, 0.4739420166015625, 0.4737402954101563, 0.473987060546875, 0.47486770629882813, 0.4739993591308594, 0.475114501953125, 0.47402597045898437, 0.4752353210449219, 0.47403109741210936, 0.4739563598632813, 0.4750960693359375, 0.4755916748046875, 0.4742686767578125, 0.47484210205078126, 0.4754851989746094, 0.4751022033691406, 0.4755619812011719, 0.4745973815917969, 0.47523736572265624, 0.474829833984375, 0.474850341796875, 0.47421026611328126, 0.4759234619140625, 0.47534796142578123, 0.4750888977050781, 0.4741591186523437, 0.4745441284179687, 0.473744384765625, 0.4741099548339844, 0.473807861328125, 0.47429119873046877, 0.4740597839355469, 0.4742881164550781, 0.47400347900390627, 0.4744253234863281, 0.47619073486328123, 0.4757626953125, 0.4742860717773438, 0.47489535522460935, 0.47489950561523436, 0.47460345458984377, 0.4742686767578125, 0.4739921875, 0.4739420166015625, 0.47433624267578123, 0.4748810119628906, 0.4744110107421875, 0.47424615478515625, 0.980806640625, 0.4736993408203125, 0.47429733276367186, 0.4741478271484375, 0.4741099548339844, 0.47420416259765624, 0.4742000732421875, 0.47553228759765626, 0.4755077209472656, 0.4747817077636719, 0.47485235595703124, 0.4747202453613281, 0.4752384033203125, 0.4742318115234375, 0.4740137023925781, 0.4751011962890625, 0.47453387451171875, 0.4759981994628906, 0.4747898864746094, 0.4742266845703125, 0.47406185913085935, 0.4743239440917969, 0.4744366149902344, 0.47420416259765624, 0.47499981689453125, 0.47433114624023437, 0.47446322631835935, 0.47578317260742187, 0.47489739990234375, 0.4754503784179688, 0.4746977233886719, 0.47437005615234373, 0.4743680114746094, 0.47590194702148436, 0.47595211791992187, 0.47569818115234375, 0.474777587890625, 0.474113037109375, 0.4756346740722656, 0.4741652526855469, 0.47452056884765625, 0.4742778930664063, 0.474176513671875, 0.47388363647460935, 0.473754638671875, 0.47433624267578123, 0.474029052734375, 0.47412841796875, 0.4740433654785156, 0.47558758544921875, 0.4756910400390625, 0.4747693786621094, 0.47467111206054685, 0.4742625427246094, 0.4742799377441406, 0.473807861328125, 0.474387451171875, 0.47398092651367185, 0.4740495300292969, 0.47483187866210935, 0.4742236022949219, 0.4746342468261719, 0.4739717102050781, 0.98069091796875, 0.4739993591308594, 0.4743341979980469, 0.4740321350097656, 0.4741048278808594, 0.47378021240234375, 0.4756019287109375, 0.4752998352050781, 0.47476327514648436, 0.47476531982421877, 0.47458303833007814, 0.474603515625, 0.4745164794921875, 0.47447552490234374, 0.4745482177734375, 0.47400344848632814, 0.4754913330078125, 0.47445709228515626, 0.4743076171875, 0.4742041320800781, 0.47417855834960937, 0.4741171264648438, 0.47425332641601564, 0.47636785888671873, 0.4741949462890625, 0.4747110290527344, 0.4743659973144531, 0.47549435424804687, 0.47543499755859375, 0.4749854736328125, 0.4751646728515625, 0.4754985046386719, 0.4756643981933594, 0.475114501953125, 0.4750315551757813, 0.47576882934570314, 0.47484927368164065, 0.47597467041015623, 0.4747806701660156, 0.47479296875, 0.47538177490234373, 0.4749906005859375, 0.47488409423828126, 0.4746322021484375, 0.4749168701171875, 0.47477862548828126, 0.4759582824707031, 0.47688909912109373, 0.47617330932617186, 0.47571865844726563, 0.4744898681640625, 0.4741591186523437, 0.47446939086914064, 0.4745513000488281, 0.4750899047851562, 0.4740403137207031, 0.474461181640625, 0.4741929016113281, 0.47546676635742186, 0.47404339599609374, 0.474176513671875, 0.47427685546875, 0.47412841796875, 0.979821533203125, 0.4742574157714844, 0.4741734313964844, 0.47405874633789064, 0.47411508178710937, 0.47479910278320314, 0.476015625, 0.47458816528320313, 0.4742225952148437, 0.47418679809570313, 0.4743782043457031, 0.47417138671875, 0.4748810119628906, 0.47444174194335936, 0.4738058166503906, 0.4744478759765625, 0.47413861083984377, 0.47446221923828125, 0.47416116333007813, 0.47388262939453124, 0.47408843994140626, 0.47393484497070315, 0.4740771789550781, 0.47400857543945313, 0.47406695556640627, 0.4738518981933594, 0.4751022033691406, 0.4763402099609375, 0.47449191284179687, 0.4742512512207031, 0.4740157470703125, 0.4743352355957031, 0.47394509887695313, 0.473849853515625, 0.47430963134765625, 0.4738713684082031, 0.47423590087890627, 0.4743987121582031, 0.47423590087890627, 0.4738887634277344, 0.4744407043457031, 0.4742297668457031, 0.4740741271972656, 0.47423590087890627, 0.47649996948242185, 0.4742901611328125, 0.4741949462890625, 0.47464346313476563, 0.476073974609375, 0.47489022827148436, 0.4758292541503906, 0.47497113037109373, 0.47431890869140625, 0.4741651916503906, 0.4740843505859375, 0.47435162353515625, 0.47437310791015624, 0.4743291015625, 0.47532852172851564, 0.474313720703125, 0.474640380859375, 0.47469473266601564, 0.4747908630371094, 0.9814691772460937, 0.4747683715820312, 0.4747376708984375, 0.4743690185546875, 0.4749127807617187, 0.4760391540527344, 0.4749434814453125, 0.4753387451171875, 0.4744488830566406, 0.4743935852050781, 0.4743935852050781, 0.47374334716796873, 0.47430349731445315, 0.47411404418945313, 0.47499264526367185, 0.47437823486328123, 0.4740597839355469, 0.4755292053222656, 0.474429443359375, 0.4746004333496094, 0.47424716186523436, 0.4757176208496094, 0.474365966796875, 0.4744038391113281, 0.47409765625, 0.47461581420898435, 0.47587124633789063, 0.47467724609375, 0.47449700927734373, 0.4752076721191406, 0.47526400756835935, 0.4758815002441406, 0.4752086791992188, 0.4745318298339844, 0.4740362548828125, 0.47401980590820314, 0.474777587890625, 0.47447760009765627, 0.47450723266601563, 0.4745646057128906, 0.4748257141113281, 0.4746987609863281, 0.4746670227050781, 0.4746844177246094, 0.4750899047851562, 0.474967041015625, 0.4748011474609375, 0.47588555908203123, 0.47625112915039064, 0.4750325622558594, 0.4751790161132812, 0.47611801147460936, 0.4751523742675781, 0.4750223388671875, 0.4746065979003906, 0.4746875, 0.47461068725585936, 0.47841998291015625, 0.4753489990234375, 0.47590911865234375, 0.4750417785644531, 0.474998779296875, 0.47562240600585937, 0.9817487182617187, 0.47520870971679685, 0.4753449096679688, 0.47469158935546873, 0.4756121520996094, 0.47580160522460935, 0.47535000610351563, 0.47563983154296874, 0.47536431884765623, 0.4756459655761719, 0.4758149108886719, 0.47476327514648436, 0.47486669921875, 0.4744960021972656, 0.4750315551757813, 0.4745902099609375, 0.4752404479980469, 0.4754586181640625, 0.47481234741210937, 0.47426150512695314, 0.4745635986328125, 0.4747796630859375, 0.4742758483886719, 0.4749291381835937, 0.4753070068359375, 0.4757319641113281, 0.47559988403320314, 0.4744366149902344, 0.4746649475097656, 0.47449087524414063, 0.47499365234375, 0.47450009155273437, 0.47397579956054686, 0.47451034545898435, 0.47416116333007813, 0.4750878601074219, 0.47417752075195313, 0.47466085815429687, 0.4771686401367187, 0.4746567687988281, 0.47440997314453126, 0.4744816589355469, 0.4743618469238281, 0.47448269653320313, 0.47479705810546874, 0.47491070556640624, 0.47615179443359373, 0.4757442626953125, 0.4753827819824219, 0.4749906005859375, 0.4746219482421875, 0.4746925964355469, 0.4743485412597656, 0.4746967163085937, 0.47494552612304686, 0.4743270263671875, 0.47532647705078124, 0.47462603759765626, 0.4751585388183594, 0.4746270751953125, 0.47493734741210936, 0.47487387084960936, 0.4749823913574219, 0.9828372192382813, 0.4750807189941406, 0.4744366149902344, 0.4755558471679687, 0.47547698974609376, 0.47533465576171874, 0.4759132080078125, 0.4759879760742188, 0.47586407470703124, 0.4761640930175781, 0.4759255065917969, 0.4748451843261719, 0.474919921875, 0.476189697265625, 0.4745257263183594, 0.4750560913085938, 0.475325439453125, 0.474986572265625, 0.4742686157226563, 0.4745994262695313, 0.4747817077636719, 0.4749609069824219, 0.4750120849609375, 0.47644467163085935, 0.47719833374023435, 0.47524453735351563, 0.4750530700683594, 0.4744356384277344, 0.47469769287109376, 0.4745891418457031, 0.47447756958007814, 0.4750274658203125, 0.47433831787109376, 0.47436083984375, 0.4751431579589844, 0.4747120666503906, 0.474977294921875, 0.47543295288085935, 0.4742215576171875, 0.47395327758789063, 0.474292236328125, 0.473849853515625, 0.4742840270996094, 0.4745062255859375, 0.47460455322265627, 0.475863037109375, 0.4749680786132813, 0.4751790161132812, 0.4745482177734375, 0.47489227294921876, 0.4745994873046875, 0.47445599365234375, 0.47442022705078124, 0.47415603637695314, 0.47442739868164063, 0.474777587890625, 0.47418060302734377, 0.4745369567871094, 0.47485952758789063, 0.4748001708984375, 0.474187744140625, 0.47446221923828125, 0.47530703735351565, 0.9833133544921875, 0.4750899047851562, 0.4764405822753906, 0.47526705932617186, 0.4746055603027344, 0.47418368530273436, 0.4742369384765625, 0.4743475341796875, 0.474524658203125, 0.474323974609375, 0.4742778930664063, 0.47433010864257813, 0.4754462585449219, 0.47468850708007815, 0.474829833984375, 0.4747304992675781, 0.47638424682617186, 0.4761231994628906, 0.47679379272460937, 0.4750878601074219, 0.4745164794921875, 0.47426150512695314, 0.4747745361328125, 0.47573504638671876, 0.4750520324707031, 0.474893310546875, 0.4743792724609375, 0.4743096923828125, 0.474462158203125, 0.47433831787109376, 0.4749609069824219, 0.4743250427246094, 0.4742419738769531, 0.47488204956054686, 0.47455438232421876, 0.4743075866699219, 0.4756408386230469, 0.47438339233398436, 0.4743741760253906, 0.47458706665039063, 0.47492404174804687, 0.47545547485351564, 0.47435980224609375, 0.47457485961914064, 0.4752353210449219, 0.47622964477539065, 0.47467416381835936, 0.47408126831054687, 0.47500799560546875, 0.47463320922851565, 0.4743915405273437, 0.4742758483886719, 0.47516363525390626, 0.47539404296875, 0.4750796813964844, 0.4738518981933594, 0.4742266845703125, 0.477939697265625, 0.474281982421875, 0.47408126831054687, 0.4742758483886719, 0.4744366149902344, 0.4743372802734375, 0.9826785278320312, 0.4751769714355469, 0.47564697265625, 0.47452569580078124, 0.4744765319824219, 0.47413861083984377, 0.47415090942382815, 0.4741754760742187, 0.4749885559082031, 0.47412017822265623, 0.4737423400878906, 0.4744488830566406, 0.4739051513671875, 0.4738478088378906, 0.4745400390625, 0.47486770629882813, 0.47466085815429687, 0.47589376831054686, 0.47535205078125, 0.4755548095703125, 0.4743424072265625, 0.47426663208007813, 0.47488409423828126, 0.47611911010742186, 0.47500384521484373, 0.4744591369628906, 0.47539813232421874, 0.47478475952148436, 0.47490567016601565, 0.47446112060546874, 0.475104248046875, 0.4745164794921875, 0.47489535522460935, 0.47453900146484373, 0.4745594787597656, 0.47434445190429686, 0.47417752075195313, 0.47448269653320313, 0.4744724731445312, 0.4748072509765625, 0.47473458862304685, 0.4740843505859375, 0.4747591552734375, 0.4748912658691406, 0.4760514526367188, 0.47554150390625, 0.4754565124511719, 0.47558041381835936, 0.4751912841796875, 0.47503564453125, 0.47414990234375, 0.4742850646972656, 0.47414886474609375, 0.47481036376953123, 0.47404852294921873, 0.4742584228515625, 0.4739358825683594, 0.4744171447753906, 0.4741263427734375, 0.47414169311523435, 0.4739491882324219, 0.47383755493164065, 0.4742522888183594, 0.983693359375, 0.47566241455078123, 0.47446722412109377, 0.4746977233886719, 0.4751247253417969, 0.47474688720703123, 0.47455032348632814, 0.4745143737792969, 0.474745849609375, 0.4746485900878906, 0.4754575500488281, 0.47418881225585935, 0.474029052734375, 0.47392666625976565, 0.4740771789550781, 0.4740321350097656, 0.4739317626953125, 0.4744591369628906, 0.47454925537109377, 0.47516058349609375, 0.47410687255859374, 0.4743935852050781, 0.4761661376953125, 0.4749885559082031, 0.47419699096679685, 0.47460250854492186, 0.47660543823242185, 0.4745430908203125, 0.4743280639648437, 0.47440179443359376, 0.4743813171386719, 0.47454721069335937, 0.4742799377441406, 0.4744407043457031, 0.4743854064941406, 0.47423898315429686, 0.47517593383789064, 0.4753879089355469, 0.47501516723632814, 0.47492202758789065, 0.4744867858886719, 0.47409762573242187, 0.47553741455078125, 0.4761282653808594, 0.47627365112304687, 0.47592141723632814, 0.474745849609375, 0.47454931640625, 0.47427374267578126, 0.47429937744140627, 0.47435775756835935, 0.47429937744140627, 0.475188232421875, 0.4755568542480469, 0.4755281982421875, 0.4754483337402344, 0.4758005676269531, 0.47536639404296877, 0.4753592224121094, 0.4763494567871094, 0.47446011352539064, 0.4744376220703125, 0.4739686279296875]",tokens/s,2.074646015839662,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3927.220224,12732.33408,0.0,12085.886976,11337.364992,s,10,11.005286376953126,1.1005286376953127,0.00219951912246486,1.1001186523437498,1.1034800170898438,1.1035955383300782,1.1036879553222656,"[1.1009342041015624, 1.102259765625, 1.0977261962890625, 1.099001708984375, 1.0989405517578126, 1.0975576171875, 1.0993031005859375, 1.103454345703125, 1.1023978271484376, 1.1037110595703126]",tokens/s,232.615482443152,kWh,1.2971816278166244e-05,7.107321095700172e-06,6.386246775661464e-05,8.394160513048104e-05,tokens/kWh,3049739.156191579,MB,3927.220224,12732.33408,0.0,12085.886976,11686.79936,s,10,645.4452890625,64.54452890625001,0.006590572212624524,64.54356250000001,64.553694140625,64.5538998046875,64.5540643359375,"[64.5536484375, 64.5513828125, 64.538359375, 64.55410546875, 64.5438203125, 64.5392109375, 64.53455859375, 64.5433046875, 64.5480546875, 64.53884375]",tokens/s,0.9760703357445925,kWh,0.0007619009707371396,0.00041758989640207795,0.003727131037257969,0.004906621904397187,tokens/kWh,12839.791047184834,,s,629,654.3247843627931,1.0402619783192257,0.1307385835871094,1.0244464111328124,1.0251733154296874,1.0253723876953125,2.12362228515625,"[1.0239293212890626, 1.0247884521484374, 1.0239590454101561, 1.0238433227539063, 1.0245233154296876, 1.0249482421875, 1.0245938720703125, 1.024648193359375, 1.0243778076171874, 1.0244761962890625, 1.0242918701171875, 1.02411474609375, 1.0238392333984374, 1.02443310546875, 1.0240072021484374, 1.025123291015625, 1.0243983154296874, 1.0239702758789062, 1.0238289794921875, 1.024879638671875, 1.0246707763671874, 1.0245980224609375, 1.0249052734375, 1.0246092529296875, 1.0252646484375, 1.025154052734375, 1.0244208984375, 1.0245867919921876, 1.025153076171875, 1.0249769287109376, 1.025039306640625, 1.0247618408203125, 1.0240020751953125, 1.0246666259765624, 1.024626708984375, 1.024693359375, 1.0244617919921875, 1.024501708984375, 1.02472802734375, 1.0254991455078124, 1.024680908203125, 1.025005615234375, 1.0249237060546874, 1.0251907958984374, 1.0256405029296876, 1.0250250244140624, 1.025184814453125, 1.02483251953125, 1.024786376953125, 1.0252471923828126, 1.0244935302734375, 1.0253773193359375, 1.024436279296875, 1.025090576171875, 1.025364990234375, 1.0249923095703124, 1.0241658935546876, 1.024015380859375, 1.0247249755859376, 1.0251048583984375, 1.0238597412109376, 1.02401123046875, 2.12693603515625, 1.0248826904296875, 1.024394287109375, 1.02513671875, 1.02491650390625, 1.024427978515625, 1.02502294921875, 1.0245028076171876, 1.0245499267578124, 1.024806884765625, 1.024963623046875, 1.024541748046875, 1.02464306640625, 1.0238269653320313, 1.024679931640625, 1.025101806640625, 1.0239959106445313, 1.0239354858398437, 1.024395263671875, 1.0241566162109375, 1.0247188720703124, 1.0248836669921875, 1.0245191650390626, 1.0240450439453126, 1.024563232421875, 1.0246707763671874, 1.0240665283203125, 1.02398974609375, 1.02415869140625, 1.024153564453125, 1.0244813232421874, 1.0248365478515624, 1.0246881103515626, 1.0250660400390625, 1.024818115234375, 1.02487451171875, 1.0241402587890625, 1.0241505126953125, 1.0248140869140625, 1.0247342529296875, 1.0252052001953125, 1.024384033203125, 1.0249267578125, 1.0247813720703125, 1.0250096435546876, 1.02519091796875, 1.0286304931640624, 1.0247679443359374, 1.0250260009765626, 1.02523291015625, 1.0252830810546876, 1.024384033203125, 1.0238484497070313, 1.0244556884765625, 1.0247547607421874, 1.0238433227539063, 1.0240604248046874, 1.0238505249023437, 1.0239692993164062, 1.02425390625, 1.0245499267578124, 1.0240184326171875, 1.0239478149414063, 2.123869140625, 1.024112548828125, 1.0244178466796876, 1.024606201171875, 1.024331787109375, 1.024970703125, 1.0241341552734375, 1.0240655517578126, 1.0246256103515625, 1.024216064453125, 1.02445361328125, 1.0246973876953125, 1.02462158203125, 1.0241884765625, 1.024816162109375, 1.0244024658203126, 1.0240604248046874, 1.0241331787109376, 1.02439013671875, 1.024101318359375, 1.02460107421875, 1.0241126708984376, 1.0239580078125, 1.0260316162109375, 1.024153564453125, 1.024626708984375, 1.025184814453125, 1.024384033203125, 1.0247044677734376, 1.0242744140625, 1.0245263671875, 1.0240440673828124, 1.0243983154296874, 1.0242017822265626, 1.025048583984375, 1.02445263671875, 1.0241658935546876, 1.024067626953125, 1.0245570068359375, 1.0243236083984375, 1.0241505126953125, 1.0240552978515625, 1.0239324340820313, 1.02377880859375, 1.0250537109375, 1.0241873779296875, 1.0242979736328126, 1.0260521240234375, 1.0243245849609375, 1.0240809326171876, 1.024226318359375, 1.02384228515625, 1.0239518432617187, 1.02460107421875, 1.0242713623046875, 1.023963134765625, 1.0244454345703125, 1.02419970703125, 1.0243572998046875, 1.025553466796875, 1.0245345458984374, 1.0243184814453126, 1.02401123046875, 2.12367041015625, 1.0241259765625, 1.0242652587890626, 1.0244208984375, 1.0240543212890625, 1.0244075927734375, 1.0240450439453126, 1.024143310546875, 1.024236572265625, 1.0248232421875, 1.0240543212890625, 1.0240615234375, 1.0252451171875, 1.0245201416015626, 1.02453857421875, 1.024142333984375, 1.024111572265625, 1.0240758056640624, 1.0245355224609376, 1.0249923095703124, 1.0250772705078126, 1.025503173828125, 1.024585693359375, 1.025353759765625, 1.0251724853515625, 1.025302490234375, 1.0245509033203124, 1.02447412109375, 1.025395751953125, 1.0254765625, 1.025158203125, 1.02477001953125, 1.025292236328125, 1.024942138671875, 1.024954345703125, 1.024868408203125, 1.025138671875, 1.0246287841796875, 1.0256414794921875, 1.025081298828125, 1.0248232421875, 1.023910888671875, 1.0245693359375, 1.0238443603515626, 1.027009521484375, 1.0242816162109376, 1.0239989624023438, 1.0243726806640625, 1.0255267333984375, 1.024779296875, 1.0242899169921875, 1.0245653076171874, 1.0248477783203125, 1.0242303466796876, 1.025016845703125, 1.024173095703125, 1.0242969970703124, 1.024927734375, 1.0247515869140624, 1.02410546875, 1.0243441162109375, 1.024101318359375, 1.024564208984375, 2.12349853515625, 1.0242017822265626, 1.024427978515625, 1.0245703125, 1.0245938720703125, 1.025349609375, 1.02506396484375, 1.02497900390625, 1.0247310791015625, 1.0254193115234376, 1.0243072509765625, 1.02436865234375, 1.02426416015625, 1.024543701171875, 1.0246932373046875, 1.0242508544921876, 1.0247291259765625, 1.0247679443359374, 1.0248099365234375, 1.0243450927734374, 1.0237440185546876, 1.0239437255859376, 1.02409619140625, 1.0248201904296874, 1.024573486328125, 1.0249298095703125, 1.0247445068359375, 1.024711669921875, 1.0242037353515625, 1.025076171875, 1.0243707275390626, 1.0241719970703125, 1.0248785400390625, 1.0241934814453124, 1.0238074951171876, 1.0240870361328125, 1.0238863525390625, 1.024194580078125, 1.0243511962890626, 1.023699951171875, 1.0239385375976562, 1.0239354858398437, 1.0245355224609376, 1.0244464111328124, 1.02446484375, 1.0242958984375, 1.024153564453125, 1.0246614990234375, 1.0249881591796874, 1.0237716674804687, 1.02404296875, 1.0248468017578125, 1.0250352783203125, 1.024362548828125, 1.0239723510742187, 1.02413623046875, 1.0246624755859375, 1.029676025390625, 1.02432568359375, 1.0238546142578124, 1.0238064575195311, 1.0237429809570313, 1.02468603515625, 2.1217158203125, 1.0237695922851562, 1.024711669921875, 1.02485498046875, 1.0252984619140626, 1.0250035400390625, 1.0246727294921876, 1.024594970703125, 1.0253424072265624, 1.024754638671875, 1.0237081298828126, 1.024232421875, 1.024865234375, 1.0245396728515626, 1.02403173828125, 1.02407275390625, 1.02489599609375, 1.0252052001953125, 1.0250966796875, 1.023773681640625, 1.02441162109375, 1.02495947265625, 1.0239385375976562, 1.0248038330078124, 1.0244832763671874, 1.0240286865234376, 1.0238095092773438, 1.02410546875, 1.024607177734375, 1.0238873901367187, 1.0241719970703125, 1.0245919189453125, 1.0244515380859376, 1.0247874755859374, 1.0245714111328126, 1.02497998046875, 1.0246953125, 1.024564208984375, 1.02820654296875, 1.02466455078125, 1.02426220703125, 1.02436962890625, 1.0242867431640625, 1.0247608642578125, 1.023847412109375, 1.0239989624023438, 1.0239672241210938, 1.024320556640625, 1.0242447509765624, 1.02379931640625, 1.02389453125, 1.024280517578125, 1.0238443603515626, 1.0239979248046875, 1.02392626953125, 1.02389453125, 1.0241024169921875, 1.0241402587890625, 1.024089111328125, 1.0238341064453125, 1.0239702758789062, 1.0240225830078125, 1.02510595703125, 2.124275634765625, 1.02431640625, 1.0250086669921874, 1.0248448486328126, 1.0252420654296874, 1.024489501953125, 1.0244403076171875, 1.0246031494140626, 1.0247711181640624, 1.0237265625, 1.0236641235351562, 1.0239344482421875, 1.0241915283203125, 1.023867919921875, 1.02379931640625, 1.0237245483398438, 1.024006103515625, 1.023836181640625, 1.024227294921875, 1.02468603515625, 1.0239273071289063, 1.0240594482421874, 1.0241719970703125, 1.024067626953125, 1.023867919921875, 1.0238515014648437, 1.025666015625, 1.0245263671875, 1.024973876953125, 1.02382080078125, 1.0239006958007812, 1.0243861083984376, 1.0252000732421875, 1.025047607421875, 1.0245989990234374, 1.0247177734375, 1.023931396484375, 1.0238443603515626, 1.0247506103515625, 1.0237757568359376, 1.0241033935546875, 1.0239365844726562, 1.0244884033203125, 1.0237399291992189, 1.024362548828125, 1.024405517578125, 1.024089111328125, 1.0244864501953126, 1.02445263671875, 1.02447509765625, 1.024986083984375, 1.0248734130859376, 1.0243768310546875, 1.024090087890625, 1.024077880859375, 1.0242088623046874, 1.0244178466796876, 1.0243809814453124, 1.024343017578125, 1.024501708984375, 1.0247445068359375, 1.024690185546875, 1.0249298095703125, 2.128819091796875, 1.02439111328125, 1.0247823486328125, 1.0247977294921875, 1.02436767578125, 1.0251878662109375, 1.025364990234375, 1.0248714599609374, 1.024216064453125, 1.023978515625, 1.024204833984375, 1.024175048828125, 1.0241976318359376, 1.02432568359375, 1.024288818359375, 1.024332763671875, 1.02521142578125, 1.0247762451171876, 1.0244351806640626, 1.0237880249023437, 1.0244085693359375, 1.024279541015625, 1.024522216796875, 1.0245447998046875, 1.0244136962890624, 1.02485302734375, 1.0252779541015624, 1.0244249267578125, 1.0243072509765625, 1.024364501953125, 1.0249554443359374, 1.02441064453125, 1.02491748046875, 1.025059814453125, 1.0243563232421875, 1.024710693359375, 1.0254488525390626, 1.0243583984375, 1.024216064453125, 1.0244423828125, 1.024673828125, 1.0236497802734374, 1.0240399169921874, 1.02411572265625, 1.0241024169921875, 1.0244669189453126, 1.024669677734375, 1.0245672607421874, 1.0250875244140625, 1.0249993896484375, 1.024100341796875, 1.0239754028320311, 1.0242611083984374, 1.0236723022460938, 1.0240758056640624, 1.025005615234375, 1.0246123046875, 1.024385009765625, 1.024511962890625, 1.0244013671875, 1.0243123779296874, 1.0240665283203125, 1.0242939453125, 2.1268427734375, 1.0253404541015625, 1.0243931884765625, 1.0240972900390626, 1.0241934814453124, 1.0252298583984376, 1.024215087890625, 1.0247833251953125, 1.024521240234375, 1.023973388671875, 1.0242017822265626, 1.0258052978515626, 1.024546875, 1.0249359130859375, 1.025333251953125, 1.0250526123046875, 1.02436962890625, 1.0253035888671875, 1.0246348876953124, 1.0245201416015626, 1.02516015625, 1.0252349853515625, 1.02502294921875, 1.024257080078125, 1.0243369140625, 1.0239395751953124, 1.02425390625, 1.0249779052734376, 1.024067626953125, 1.0241177978515625, 1.0244976806640624, 1.025036376953125, 1.02519189453125, 1.02483349609375, 1.0254356689453126, 1.0247762451171876, 1.024606201171875, 1.0247310791015625, 1.024310302734375, 1.0240450439453126, 1.024733154296875, 1.0244976806640624, 1.0241402587890625, 1.0240235595703124, 1.025154052734375, 1.023847412109375, 1.0244290771484375, 1.0241136474609376, 1.0241822509765626, 1.0238689575195312, 1.0249298095703125, 1.0245478515625, 1.0244771728515625, 1.0245509033203124, 1.024611328125, 1.0240921630859374, 1.024890869140625, 1.0240205078125, 1.024279541015625, 1.0245560302734376, 1.0247720947265626, 1.024206787109375, 1.0244300537109374, 2.128713623046875, 1.024427001953125, 1.0249287109375, 1.025007568359375, 1.0242989501953126, 1.025701904296875, 1.0250526123046875, 1.024421875, 1.02443115234375, 1.0251038818359375, 1.024236572265625, 1.0237388916015624, 1.0245765380859375, 1.023847412109375, 1.0237470703125, 1.0244495849609374, 1.024459716796875, 1.024206787109375, 1.02432666015625, 1.024251953125, 1.0239293212890626, 1.024359375, 1.02411669921875, 1.0238505249023437, 1.024151611328125, 1.025427490234375, 1.0240450439453126, 1.024953369140625, 1.02416796875, 1.0247802734375, 1.0241229248046875, 1.024373779296875, 1.024141357421875, 1.0242396240234375, 1.02449560546875, 1.0251766357421874, 1.024015380859375, 1.0237122802734375, 1.0239969482421876, 1.0238975830078125, 1.0237726440429689, 1.02384130859375, 1.02431640625, 1.0243133544921874, 1.0245919189453125, 1.024943115234375, 1.0246246337890625, 1.024611328125, 1.024716796875, 1.024035888671875, 1.0247413330078126, 1.024973876953125, 1.024796630859375, 1.0246318359375, 1.0259609375, 1.0238064575195311, 1.0238228759765624, 1.02377880859375, 1.02478955078125, 1.02438916015625, 1.0245723876953126, 1.0243604736328125, 1.02432568359375]",tokens/s,0.9612963088545465,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4008.419328,15760.621568,0.0,15114.174464,14046.123008,s,10,15.848047607421876,1.5848047607421876,0.001058458314234293,1.584490295410156,1.5859812255859376,1.5864652832031252,1.5868525292968751,"[1.5851588134765624, 1.583447265625, 1.5845269775390625, 1.583828125, 1.58445361328125, 1.583693603515625, 1.5843594970703125, 1.5857567138671875, 1.5869493408203126, 1.5858736572265626]",tokens/s,161.53409324698862,kWh,1.869280167751842e-05,1.0243654597979911e-05,8.793465368099884e-05,0.00011687110995649717,tokens/kWh,2190447.23794692,MB,4008.419328,15760.621568,0.0,15114.174464,14170.904576,s,10,927.2718203125,92.72718203125001,0.005797509915113206,92.7283046875,92.73256640625,92.73465820312501,92.736331640625,"[92.7307421875, 92.72909375, 92.73675, 92.7321015625, 92.7264921875, 92.7241015625, 92.7158671875, 92.7194609375, 92.7296953125, 92.727515625]",tokens/s,0.6794124292353493,kWh,0.0010946948910421795,0.0005999896856933628,0.005209015139431006,0.006903699716166547,tokens/kWh,9125.541751543959,,s,629,940.0627094726563,1.494535309177514,0.18825786848907494,1.47182177734375,1.4724240966796875,1.4726346923828124,3.055808037109375,"[1.4722620849609376, 1.4717921142578125, 1.47181982421875, 1.4719560546875, 1.4718125, 1.4717183837890624, 1.472183349609375, 1.47190576171875, 1.4715218505859375, 1.47171630859375, 1.47156787109375, 1.4714951171875, 1.471922119140625, 1.4721187744140625, 1.4715177001953126, 1.47153515625, 1.4718924560546875, 1.4718126220703125, 1.4714593505859375, 1.4722181396484375, 1.4722314453125, 1.4714234619140625, 1.471951904296875, 1.4719990234375, 1.471762451171875, 1.4720880126953124, 1.472605224609375, 1.471382568359375, 1.4716119384765625, 1.471868896484375, 1.4716876220703126, 1.4716446533203125, 1.472257080078125, 1.4723829345703126, 1.472047119140625, 1.4720819091796875, 1.47234716796875, 1.4716507568359376, 1.47162109375, 1.471951904296875, 1.472247802734375, 1.472111572265625, 1.471752197265625, 1.4720460205078125, 1.4718433837890625, 1.471847412109375, 1.473238037109375, 1.4716826171875, 1.4716129150390624, 1.471711181640625, 1.4720286865234375, 1.4715771484375, 1.47129443359375, 1.4723441162109374, 1.47237890625, 1.4720123291015625, 1.4720389404296874, 1.4725570068359375, 1.4715821533203124, 1.47163232421875, 1.472152587890625, 1.4719139404296875, 3.055659912109375, 1.4719190673828124, 1.4714122314453124, 1.4715750732421875, 1.4726441650390625, 1.4716497802734374, 1.4719395751953126, 1.4714327392578126, 1.4716334228515624, 1.4717982177734374, 1.4725570068359375, 1.472247802734375, 1.4715555419921875, 1.4713795166015624, 1.47167431640625, 1.4718515625, 1.471489990234375, 1.4721033935546874, 1.4721280517578126, 1.471662109375, 1.47139990234375, 1.4714696044921876, 1.471595458984375, 1.4714849853515626, 1.472489501953125, 1.4717716064453126, 1.471542236328125, 1.4713282470703124, 1.4715074462890625, 1.4714327392578126, 1.4716241455078125, 1.4727454833984375, 1.4718914794921876, 1.472322509765625, 1.4719334716796875, 1.47182177734375, 1.4713436279296874, 1.4722703857421875, 1.47228466796875, 1.471656982421875, 1.471455322265625, 1.4724454345703124, 1.4727833251953124, 1.472058349609375, 1.472762939453125, 1.4725816650390624, 1.47173583984375, 1.4722109375, 1.4722037353515625, 1.4719927978515626, 1.4716068115234375, 1.4723389892578125, 1.4720194091796874, 1.4716221923828126, 1.4717747802734376, 1.47186279296875, 1.4714490966796876, 1.471636474609375, 1.4723154296875, 1.471784912109375, 1.47171533203125, 1.4717327880859374, 1.472248779296875, 3.057259521484375, 1.47224267578125, 1.4718597412109375, 1.472489501953125, 1.4721064453125, 1.4717808837890625, 1.471899658203125, 1.4724228515625, 1.472541748046875, 1.47203173828125, 1.472416748046875, 1.47173583984375, 1.471767578125, 1.4718350830078124, 1.472779296875, 1.4719927978515626, 1.47137841796875, 1.4718525390625, 1.4718648681640625, 1.4717603759765625, 1.4714869384765625, 1.47251611328125, 1.4720552978515624, 1.47161083984375, 1.4713856201171875, 1.4714736328125, 1.4710916748046876, 1.471134765625, 1.4725887451171875, 1.4723441162109374, 1.4716630859375, 1.471782958984375, 1.4716395263671875, 1.4718515625, 1.4722734375, 1.472278564453125, 1.4714920654296875, 1.47167431640625, 1.47209423828125, 1.4719703369140624, 1.47245263671875, 1.4724505615234376, 1.47195703125, 1.47188525390625, 1.472077880859375, 1.4723031005859375, 1.4720552978515624, 1.4719139404296875, 1.4722252197265624, 1.4726502685546874, 1.4722242431640624, 1.471540283203125, 1.472048095703125, 1.4718955078125, 1.4713743896484375, 1.47283154296875, 1.4720828857421875, 1.472143310546875, 1.472288818359375, 1.4720460205078125, 1.4718065185546876, 1.471910888671875, 1.472711669921875, 3.055857666015625, 1.471562744140625, 1.4717071533203125, 1.471541259765625, 1.4722652587890626, 1.47290625, 1.4720972900390625, 1.4717244873046875, 1.4717379150390626, 1.47239111328125, 1.4715084228515625, 1.4723399658203125, 1.47213720703125, 1.47205224609375, 1.472205810546875, 1.4718515625, 1.471974365234375, 1.4717972412109375, 1.472838623046875, 1.472647216796875, 1.47232666015625, 1.47219970703125, 1.4716876220703126, 1.4716630859375, 1.4720552978515624, 1.4729554443359374, 1.471932373046875, 1.471942626953125, 1.471614990234375, 1.4719990234375, 1.4717265625, 1.4722620849609376, 1.47253857421875, 1.47146337890625, 1.4715965576171874, 1.4714061279296875, 1.471909912109375, 1.4716488037109374, 1.4721239013671874, 1.472101318359375, 1.4714869384765625, 1.471604736328125, 1.4719661865234375, 1.4716927490234375, 1.4718228759765626, 1.471889404296875, 1.4719180908203124, 1.4718177490234374, 1.4717174072265624, 1.471542236328125, 1.471983642578125, 1.4715238037109375, 1.4717327880859374, 1.472067626953125, 1.471873046875, 1.4716876220703126, 1.4719764404296876, 1.4723194580078125, 1.4719017333984374, 1.4723870849609375, 1.4717716064453126, 1.4715146484375, 1.4714757080078125, 3.056364501953125, 1.4723338623046875, 1.472585693359375, 1.4714593505859375, 1.471646728515625, 1.4715494384765626, 1.4717880859375, 1.472016357421875, 1.4726204833984375, 1.4721822509765625, 1.4717747802734376, 1.4717244873046875, 1.4717716064453126, 1.47205322265625, 1.4717276611328125, 1.472067626953125, 1.471805419921875, 1.4715555419921875, 1.471551513671875, 1.4714962158203124, 1.47133544921875, 1.4717747802734376, 1.471994873046875, 1.47158837890625, 1.471520751953125, 1.4713466796875, 1.4714747314453125, 1.4717276611328125, 1.4718135986328125, 1.4724290771484374, 1.4716488037109374, 1.4714132080078124, 1.4716231689453125, 1.4723389892578125, 1.4713077392578124, 1.47160888671875, 1.471899658203125, 1.4723778076171874, 1.471910888671875, 1.4719447021484375, 1.47195703125, 1.47152685546875, 1.472251953125, 1.471962158203125, 1.4716077880859375, 1.471457275390625, 1.4723092041015624, 1.4719969482421875, 1.4716549072265626, 1.4718648681640625, 1.47160986328125, 1.4718760986328125, 1.471826904296875, 1.4713251953125, 1.47182177734375, 1.4716077880859375, 1.472786376953125, 1.472522216796875, 1.4720809326171875, 1.4717982177734374, 1.4721248779296876, 1.471731689453125, 1.4721033935546874, 3.055680419921875, 1.4718280029296875, 1.4716129150390624, 1.4713046875, 1.4718914794921876, 1.4720511474609375, 1.4723931884765624, 1.471605712890625, 1.4720911865234374, 1.47203173828125, 1.4718822021484375, 1.4717869873046876, 1.4723829345703126, 1.4720809326171875, 1.471552490234375, 1.4721136474609375, 1.471494140625, 1.4717767333984375, 1.471177734375, 1.472689208984375, 1.4716497802734374, 1.4718802490234375, 1.4714306640625, 1.472184326171875, 1.4717860107421874, 1.47194677734375, 1.47276904296875, 1.4713814697265626, 1.4714398193359375, 1.471753173828125, 1.472689208984375, 1.472320556640625, 1.472069580078125, 1.47273828125, 1.4716497802734374, 1.4718289794921875, 1.4717060546875, 1.471951904296875, 1.4716866455078126, 1.47211669921875, 1.471942626953125, 1.4714388427734375, 1.471298583984375, 1.4713538818359375, 1.4712965087890626, 1.4712689208984375, 1.4717244873046875, 1.472357421875, 1.471731689453125, 1.4715218505859375, 1.471215576171875, 1.4711285400390626, 1.4713046875, 1.47230615234375, 1.4722723388671874, 1.4720819091796875, 1.471382568359375, 1.4713026123046875, 1.472006103515625, 1.47163232421875, 1.472415771484375, 1.471131591796875, 1.4713272705078124, 3.056773193359375, 1.4716077880859375, 1.4716282958984375, 1.471858642578125, 1.4712208251953125, 1.4718197021484376, 1.472443359375, 1.4715648193359374, 1.471594482421875, 1.472443359375, 1.4719036865234374, 1.47152587890625, 1.4711285400390626, 1.4714869384765625, 1.4713077392578124, 1.4713907470703125, 1.47195703125, 1.47201123046875, 1.4714542236328125, 1.4714757080078125, 1.471456298828125, 1.471595458984375, 1.47175732421875, 1.4725191650390625, 1.4716539306640626, 1.4716416015625, 1.47211474609375, 1.4713333740234376, 1.47162109375, 1.4716436767578125, 1.4722816162109376, 1.47161083984375, 1.471515625, 1.4715064697265625, 1.4715709228515625, 1.47129345703125, 1.4712586669921874, 1.4722181396484375, 1.471456298828125, 1.4713170166015626, 1.4715576171875, 1.47175732421875, 1.47146337890625, 1.472247802734375, 1.471603759765625, 1.4716273193359375, 1.4715709228515625, 1.472047119140625, 1.4721197509765624, 1.472300048828125, 1.4724617919921874, 1.472036865234375, 1.4715872802734375, 1.47150537109375, 1.47152685546875, 1.4712227783203125, 1.4709483642578125, 1.4714481201171874, 1.4717808837890625, 1.4714285888671874, 1.4712176513671875, 1.4715013427734376, 1.471331298828125, 3.058272216796875, 1.4714920654296875, 1.4712022705078125, 1.47119921875, 1.4717470703125, 1.471373291015625, 1.4719886474609376, 1.4716968994140625, 1.4716273193359375, 1.4718914794921876, 1.471751220703125, 1.4717174072265624, 1.471711181640625, 1.472184326171875, 1.472363525390625, 1.471656982421875, 1.4721873779296875, 1.4718863525390624, 1.4719764404296876, 1.4720511474609375, 1.4719754638671876, 1.4718924560546875, 1.4713037109375, 1.4712841796875, 1.47162109375, 1.4717725830078126, 1.472, 1.4716497802734374, 1.4716558837890625, 1.4713426513671874, 1.471215576171875, 1.4712484130859376, 1.47142041015625, 1.471537109375, 1.4719190673828124, 1.4712698974609375, 1.471478759765625, 1.47183203125, 1.4715013427734376, 1.4721925048828124, 1.47177978515625, 1.472447509765625, 1.4714500732421876, 1.4717501220703124, 1.47160888671875, 1.4718299560546875, 1.471974365234375, 1.4720081787109376, 1.472126953125, 1.4715023193359376, 1.4715084228515625, 1.47169482421875, 1.471330322265625, 1.471171630859375, 1.4719794921875, 1.4723297119140626, 1.471705078125, 1.4717818603515624, 1.4718740234375, 1.4721126708984376, 1.471266845703125, 1.472437255859375, 1.4715064697265625, 3.05955126953125, 1.4719764404296876, 1.47195703125, 1.47245361328125, 1.472227294921875, 1.4713221435546875, 1.4720296630859375, 1.4715545654296875, 1.4716138916015624, 1.4715606689453125, 1.47230517578125, 1.4725037841796875, 1.471837158203125, 1.47186376953125, 1.47192724609375, 1.4720911865234374, 1.47192626953125, 1.4723450927734374, 1.4718555908203126, 1.47135791015625, 1.4716026611328126, 1.471425537109375, 1.4714224853515625, 1.4716395263671875, 1.4727669677734374, 1.4724013671875, 1.4724403076171875, 1.47211669921875, 1.4722672119140625, 1.472236572265625, 1.47209423828125, 1.4720142822265625, 1.471340576171875, 1.4719610595703125, 1.471952880859375, 1.4716343994140626, 1.4717020263671876, 1.472510009765625, 1.4722958984375, 1.47205322265625, 1.471494140625, 1.471520751953125, 1.4711224365234374, 1.4714521484375, 1.471753173828125, 1.4722447509765626, 1.4713282470703124, 1.47150341796875, 1.472290771484375, 1.471889404296875, 1.4715074462890625, 1.4724771728515624, 1.4729246826171876, 1.4722078857421874, 1.4716180419921876, 1.471309814453125, 1.47177978515625, 1.47196923828125, 1.4722120361328126, 1.471774658203125, 1.4713466796875, 1.471520751953125, 1.4712811279296876, 3.058113525390625, 1.4730526123046874, 1.4717235107421875, 1.4721668701171875, 1.471916015625, 1.472654296875, 1.4716385498046876, 1.4719088134765625, 1.4720716552734374, 1.4713763427734374, 1.471236083984375, 1.471573974609375, 1.4717041015625, 1.471826904296875, 1.4721710205078125, 1.472248779296875, 1.471709228515625, 1.47167333984375, 1.4722354736328125, 1.47183203125, 1.4724495849609376, 1.47257861328125, 1.4722344970703125, 1.471520751953125, 1.4716630859375, 1.47169384765625, 1.472069580078125, 1.4725938720703124, 1.472268310546875, 1.471952880859375, 1.4720880126953124, 1.4720225830078124, 1.471952880859375, 1.4723450927734374, 1.472373779296875, 1.472194580078125, 1.4716927490234375, 1.471921142578125, 1.471922119140625, 1.472109619140625, 1.471762451171875, 1.4723779296875, 1.4719671630859374, 1.4716590576171875, 1.47093603515625, 1.4710968017578125, 1.4713907470703125, 1.47113671875, 1.471277099609375, 1.4721495361328125, 1.4713795166015624, 1.4710025634765624, 1.471087646484375, 1.47147265625, 1.4714593505859375, 1.4727730712890625, 1.472385986328125, 1.4715115966796875, 1.47182177734375, 1.471372314453125, 1.471321044921875, 1.4717603759765625, 1.47186181640625]",tokens/s,0.6691042987470994,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 1124, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 950, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 578, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 317, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp5zh7p2gh/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp1zss8rr4/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2baa-0d4bc9a14d0e765a712ce7c0;6379e16e-ce33-42f6-a673-78358d724427) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33ce-77047b4d1c965421168572bd;44ec6097-6a20-4eed-868c-c31320a83440) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 535, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 1034, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 274, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 672, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 84, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3426-40cbfabf47a123460812e7e0;edc4e491-70b2-424b-bb3b-e416e51eed8e) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32ca-1ae44d8c39eabc1a2d5dadf3;2ffeeb6f-b2c9-4d86-a119-08c4b8cf8af5) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a311a-3b7633db0e9c69062f17ef22;186a2c83-9acf-4607-99e3-6ebbc96fe365) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c50-2eb803555d6831f30dea6918;e921ef2f-867e-4634-9192-5a149e800883) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3320-0826f6fb5d6e69e025740b6e;84678bbe-5790-4144-8a80-1a4390042e6e) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpcyeyok3d/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30c4-4ed1e94453f390846c705788;13f7bcbe-43b9-4511-9dc7-7c5bd59eb1eb) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpkpnyhzs2/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34d2-17e3c9500f2baa52463c18bf;99ecb0c0-9208-434a-9ca2-b3083103a838) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fff-0d72cc874610617f68ed0897;9e0eae72-e432-48ee-ba3b-d9ae691a2b18) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3c4-311a3efc523c112d0dc99985;6d4b7215-dfc0-4844-9a95-b909726a1a23) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp3ie8o3yu/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc418-24a222036948bfc7704d6c85;6a496dec-6711-4676-ae6e-9d82b077c128) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc318-1e6a0ed26c94f72f3803c5c2;effcf1ef-eb41-4154-98f4-c0e6214edc1b) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1166, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1045, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 776, in forward attn_outputs, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 433, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc36d-3bf8cf111a38173958cdc9b1;0dc3a79f-9179-499c-add6-5d102d59e591) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f52-17b8f18b22d818f408e3a139;b9a2bb2c-f0bb-490a-96d9-0cbc6d7e6a8b) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 414, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2efb-38c1577d6f3df62e1147d3a2;be9959a3-a2c6-454b-87fb-15ca2a6a8446) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1237.069824,2645.03296,0.0,1998.585856,1692.285952,s,10,0.19134716606140137,0.019134716606140138,0.0005608761432091296,0.018977791786193847,0.0197670597076416,0.02017293758392334,0.02049763988494873,"[0.02057881546020508, 0.0190994873046875, 0.0187042236328125, 0.018728607177734374, 0.01878009605407715, 0.018654111862182618, 0.01891535949707031, 0.019040224075317382, 0.019169376373291015, 0.019676864624023436]",tokens/s,13378.823698797409,kWh,2.1992317273112132e-07,1.2050741516561284e-07,6.735971662309277e-07,1.014027754127662e-06,tokens/kWh,252458573.2076231,MB,1237.069824,2645.03296,0.0,1998.585856,1740.085248,s,10,11.554657470703125,1.1554657470703125,0.013645969689330972,1.1534767456054689,1.1682131958007813,1.1769882507324219,1.1840082946777344,"[1.1857633056640624, 1.14386572265625, 1.1522501220703125, 1.1418011474609375, 1.154703369140625, 1.1461051025390625, 1.1383001708984375, 1.1644542236328126, 1.16626318359375, 1.161151123046875]",tokens/s,54.52346827219822,kWh,1.387793931927391e-05,7.604759108500891e-06,2.8986009894769086e-05,5.0468708322543894e-05,tokens/kWh,1248298.244475944,,s,629,11.705972766876219,0.0186104495498827,0.0023066301847454542,0.018160640716552736,0.01886699562072754,0.01908162498474121,0.03717550079345703,"[0.019216384887695313, 0.018964479446411133, 0.018840576171875, 0.01904947280883789, 0.01902387237548828, 0.019145727157592773, 0.01901568031311035, 0.018939903259277344, 0.018876415252685547, 0.018917375564575196, 0.019091455459594727, 0.019136512756347656, 0.018971647262573242, 0.018998271942138673, 0.019066879272460938, 0.018921472549438476, 0.018934783935546876, 0.019094528198242186, 0.019323904037475585, 0.019158016204833983, 0.01887027168273926, 0.01883955192565918, 0.01860095977783203, 0.019615743637084963, 0.019579904556274414, 0.019698720932006836, 0.018974687576293944, 0.018787328720092773, 0.01881292724609375, 0.01901055908203125, 0.018912256240844725, 0.018939903259277344, 0.018815999984741212, 0.01886617660522461, 0.018852863311767578, 0.018775039672851563, 0.0186746883392334, 0.018123775482177733, 0.018050048828125, 0.018131967544555663, 0.018062335968017578, 0.018153472900390624, 0.018135040283203126, 0.01808076858520508, 0.018112512588500978, 0.018164735794067383, 0.018568191528320312, 0.01880166435241699, 0.018711551666259766, 0.018769920349121092, 0.01878937530517578, 0.018946048736572265, 0.018766847610473633, 0.01879347229003906, 0.0188221435546875, 0.01882111930847168, 0.01923788833618164, 0.01900748825073242, 0.018765823364257812, 0.018397184371948243, 0.01823232078552246, 0.018239488601684572, 0.03753472137451172, 0.018603008270263673, 0.018795520782470702, 0.018242559432983398, 0.01817087936401367, 0.018142208099365235, 0.01823232078552246, 0.018128896713256838, 0.018149375915527344, 0.018166784286499024, 0.01822719955444336, 0.018189311981201172, 0.0182108154296875, 0.018174976348876954, 0.018280448913574218, 0.01822822380065918, 0.01816166305541992, 0.018155519485473632, 0.018177024841308592, 0.01824563217163086, 0.018111520767211915, 0.018137056350708006, 0.018149375915527344, 0.018100223541259765, 0.01813811111450195, 0.01783500862121582, 0.01787494468688965, 0.017894399642944335, 0.017953792572021485, 0.017885183334350584, 0.017896448135375977, 0.017854463577270507, 0.01788313674926758, 0.017912832260131836, 0.017934335708618163, 0.018101247787475586, 0.01818726348876953, 0.018156543731689453, 0.018149375915527344, 0.018177024841308592, 0.01805619239807129, 0.01822003173828125, 0.018223104476928712, 0.018168832778930662, 0.018167808532714845, 0.018589696884155273, 0.018256895065307616, 0.018077695846557617, 0.018156543731689453, 0.018137088775634767, 0.018282495498657226, 0.018156543731689453, 0.018117631912231445, 0.01825484848022461, 0.018134016036987305, 0.018223104476928712, 0.018164735794067383, 0.018197504043579102, 0.01816985511779785, 0.018124799728393554, 0.018155519485473632, 0.018290687561035156, 0.018130943298339842, 0.037168128967285156, 0.01818009567260742, 0.018239551544189454, 0.018205631256103517, 0.018103296279907227, 0.018289663314819335, 0.018159616470336915, 0.018160640716552736, 0.01822208023071289, 0.01824563217163086, 0.018258943557739257, 0.0182794246673584, 0.01820569610595703, 0.01816268730163574, 0.01822822380065918, 0.018748416900634765, 0.01879859161376953, 0.018307071685791015, 0.018050048828125, 0.01819340705871582, 0.01815449523925781, 0.01820569610595703, 0.01810534477233887, 0.0184268798828125, 0.018166784286499024, 0.018199552536010744, 0.018164735794067383, 0.01816268730163574, 0.018464767456054687, 0.018880512237548826, 0.01871670341491699, 0.01874940872192383, 0.0186562557220459, 0.018355199813842774, 0.018729984283447267, 0.018807807922363282, 0.01882009506225586, 0.018563072204589845, 0.018144256591796876, 0.01816985511779785, 0.018226175308227538, 0.018223104476928712, 0.01814630317687988, 0.018119680404663087, 0.018130943298339842, 0.018096128463745118, 0.018131967544555663, 0.018359296798706053, 0.018117631912231445, 0.018168832778930662, 0.018172927856445312, 0.018096128463745118, 0.018293760299682618, 0.01820057678222656, 0.018135040283203126, 0.018155519485473632, 0.018283519744873047, 0.01820057678222656, 0.018164735794067383, 0.01820364761352539, 0.018145280838012694, 0.01822412872314453, 0.01813811111450195, 0.03717836761474609, 0.01821388816833496, 0.018144256591796876, 0.018122751235961913, 0.018102272033691406, 0.018163711547851562, 0.018121728897094725, 0.018184192657470705, 0.018119680404663087, 0.01819545555114746, 0.01816166305541992, 0.018160640716552736, 0.018266111373901366, 0.01821696090698242, 0.018255872726440428, 0.018323455810546875, 0.018198528289794923, 0.01815449523925781, 0.018103296279907227, 0.018280448913574218, 0.018265087127685545, 0.018174976348876954, 0.018153472900390624, 0.018174976348876954, 0.018192384719848635, 0.018147327423095702, 0.018134016036987305, 0.018181119918823242, 0.018050048828125, 0.01806438446044922, 0.018141183853149414, 0.018076671600341796, 0.01815760040283203, 0.018163679122924804, 0.018155519485473632, 0.018151424407958985, 0.01803468894958496, 0.01807360076904297, 0.018167808532714845, 0.018008064270019532, 0.017787904739379884, 0.017830911636352538, 0.017772544860839845, 0.01799782371520996, 0.018382848739624022, 0.018126848220825196, 0.017991680145263672, 0.01817087936401367, 0.018077695846557617, 0.018139135360717772, 0.018092031478881835, 0.018158592224121094, 0.018059263229370116, 0.018089984893798827, 0.018041856765747072, 0.018061311721801757, 0.018103296279907227, 0.018142208099365235, 0.018017280578613282, 0.018050048828125, 0.018076671600341796, 0.01803468894958496, 0.017994752883911135, 0.03722649765014648, 0.018111488342285157, 0.01818623924255371, 0.017994752883911135, 0.01804800033569336, 0.01926246452331543, 0.019359743118286133, 0.01949286460876465, 0.01882316780090332, 0.01883033561706543, 0.018815999984741212, 0.019165184020996092, 0.018874368667602538, 0.01860915184020996, 0.01880166435241699, 0.018739200592041014, 0.018692096710205077, 0.018678783416748047, 0.018107391357421874, 0.018124799728393554, 0.018082815170288084, 0.018174976348876954, 0.018091007232666014, 0.018122751235961913, 0.018191360473632814, 0.018147327423095702, 0.018148351669311523, 0.018145280838012694, 0.01804287910461426, 0.018082815170288084, 0.017983488082885742, 0.01805721664428711, 0.018076671600341796, 0.018067455291748045, 0.018149375915527344, 0.018094079971313477, 0.018135040283203126, 0.01814630317687988, 0.018142208099365235, 0.018115583419799804, 0.018166784286499024, 0.018386943817138672, 0.019382272720336914, 0.018767871856689454, 0.018694143295288086, 0.01863270378112793, 0.01861427116394043, 0.018083839416503905, 0.018103296279907227, 0.018050048828125, 0.018113536834716795, 0.018058240890502928, 0.01802342414855957, 0.01777663993835449, 0.017921024322509766, 0.01782681655883789, 0.017921024322509766, 0.01846784019470215, 0.018112512588500978, 0.018089984893798827, 0.018061311721801757, 0.018089984893798827, 0.01805516815185547, 0.036992000579833983, 0.018264064788818358, 0.0181790714263916, 0.01810534477233887, 0.018128896713256838, 0.018123775482177733, 0.018157567977905274, 0.018098175048828127, 0.01805516815185547, 0.018114559173583983, 0.01809715270996094, 0.01816166305541992, 0.018075647354125975, 0.018110464096069336, 0.018122751235961913, 0.018165760040283203, 0.01822208023071289, 0.018057247161865235, 0.018113504409790038, 0.01807360076904297, 0.018062335968017578, 0.018144256591796876, 0.01813811111450195, 0.018101247787475586, 0.018135040283203126, 0.018324480056762696, 0.018190336227416993, 0.018036735534667968, 0.017916927337646483, 0.017898496627807618, 0.01784012794494629, 0.01798454475402832, 0.018193376541137694, 0.018103328704833985, 0.018072544097900392, 0.018150400161743165, 0.01800601577758789, 0.01804595184326172, 0.01846784019470215, 0.02028441619873047, 0.019355648040771483, 0.018916351318359375, 0.019009536743164062, 0.01843404769897461, 0.018092031478881835, 0.017987583160400392, 0.018111488342285157, 0.01802444839477539, 0.01803775978088379, 0.018012191772460936, 0.018046943664550782, 0.018084863662719726, 0.018102272033691406, 0.018140159606933593, 0.018076671600341796, 0.01803878402709961, 0.018139135360717772, 0.018066432952880858, 0.018126848220825196, 0.018110464096069336, 0.018114559173583983, 0.018098175048828127, 0.01814630317687988, 0.037294078826904296, 0.018145280838012694, 0.018086912155151368, 0.018075647354125975, 0.018165760040283203, 0.018132991790771484, 0.018043903350830077, 0.018018304824829103, 0.018028543472290038, 0.01806540870666504, 0.018144256591796876, 0.018074623107910158, 0.018058240890502928, 0.0180316162109375, 0.01807974433898926, 0.018182144165039063, 0.018156543731689453, 0.017967103958129883, 0.01810534477233887, 0.018052095413208007, 0.018083839416503905, 0.017984512329101563, 0.01803878402709961, 0.018317312240600587, 0.018141183853149414, 0.01800396728515625, 0.018159616470336915, 0.018051071166992186, 0.017992704391479493, 0.01800499153137207, 0.01804287910461426, 0.01803059196472168, 0.01802649688720703, 0.017954816818237306, 0.018076671600341796, 0.018121728897094725, 0.01807257652282715, 0.018082815170288084, 0.01803059196472168, 0.01807974433898926, 0.017958911895751953, 0.01796505546569824, 0.01797532844543457, 0.018025440216064455, 0.01803878402709961, 0.01806540870666504, 0.01796505546569824, 0.018125823974609375, 0.018075647354125975, 0.018061311721801757, 0.0180316162109375, 0.018134016036987305, 0.018086912155151368, 0.018164735794067383, 0.01799679946899414, 0.01802137565612793, 0.01804083251953125, 0.017947647094726564, 0.018076671600341796, 0.018101247787475586, 0.018081792831420897, 0.018067455291748045, 0.018050079345703126, 0.037448673248291015, 0.01804595184326172, 0.01804287910461426, 0.01805721664428711, 0.018019327163696287, 0.017979391098022462, 0.01803878402709961, 0.01813811111450195, 0.018076671600341796, 0.01804287910461426, 0.018136064529418947, 0.01821286392211914, 0.01808076858520508, 0.01781862449645996, 0.017942527770996093, 0.017855487823486327, 0.018158592224121094, 0.01843507194519043, 0.018691072463989256, 0.018387968063354493, 0.018655231475830078, 0.018695167541503906, 0.018689023971557618, 0.01859891128540039, 0.018722816467285155, 0.018726911544799805, 0.018397184371948243, 0.018044927597045898, 0.018075647354125975, 0.018089984893798827, 0.01805721664428711, 0.018367488861083983, 0.01881907272338867, 0.018697216033935548, 0.01864499282836914, 0.01866649627685547, 0.01862860870361328, 0.018646015167236327, 0.01884160041809082, 0.021174272537231444, 0.019122175216674805, 0.018735103607177735, 0.01868083190917969, 0.01863167953491211, 0.018770944595336913, 0.01869004821777344, 0.018700288772583007, 0.018790399551391602, 0.018677759170532226, 0.01881395149230957, 0.01878937530517578, 0.018729984283447267, 0.018126848220825196, 0.018069503784179687, 0.018150400161743165, 0.018163711547851562, 0.018404352188110353, 0.01885593605041504, 0.018689023971557618, 0.018748416900634765, 0.018749439239501953, 0.018803712844848632, 0.018762752532958983, 0.037664768218994144, 0.01887539291381836, 0.018889728546142577, 0.018763776779174804, 0.01877507209777832, 0.0187412166595459, 0.018119680404663087, 0.01802342414855957, 0.018107391357421874, 0.018082815170288084, 0.018150400161743165, 0.018117631912231445, 0.018062335968017578, 0.018276351928710938, 0.018740224838256835, 0.01885798454284668, 0.01875660705566406, 0.018753536224365236, 0.018559999465942383, 0.01843302345275879, 0.018131967544555663, 0.01803878402709961, 0.018724863052368163, 0.018746368408203123, 0.018697216033935548, 0.018694143295288086, 0.01867263984680176, 0.018733055114746093, 0.018939903259277344, 0.019005439758300782, 0.018803712844848632, 0.018717695236206054, 0.01846886444091797, 0.018082815170288084, 0.018060287475585936, 0.01808793640136719, 0.01844428825378418, 0.018759679794311524, 0.018736127853393555, 0.0185743350982666, 0.018044927597045898, 0.01803264045715332, 0.01802444839477539, 0.01799884796142578, 0.018070528030395508, 0.01820364761352539, 0.018120704650878908, 0.01862758445739746, 0.01864192008972168, 0.018712575912475587, 0.018655231475830078, 0.018743295669555664, 0.01866547203063965, 0.01861734390258789, 0.0187238712310791, 0.018577375411987306, 0.01866035270690918, 0.018686975479125977, 0.01861324882507324, 0.01904742431640625, 0.018710527420043945, 0.018672672271728516, 0.018756576538085937, 0.03872665786743164, 0.0188272647857666, 0.018716672897338867, 0.01861529541015625, 0.018856000900268555, 0.01869100761413574, 0.018774015426635742, 0.0186746883392334, 0.018679807662963867, 0.01840947151184082, 0.017886207580566405, 0.017796096801757814, 0.01823539161682129, 0.018374656677246092, 0.018529279708862305, 0.018464767456054687, 0.018569215774536133, 0.01862348747253418, 0.019160064697265625, 0.01885593605041504, 0.01883545684814453, 0.018778112411499022, 0.018763776779174804, 0.018713600158691408, 0.018767871856689454, 0.018744319915771485, 0.01897881507873535, 0.02040934371948242, 0.020281343460083007, 0.018932735443115235, 0.018964479446411133, 0.01883852767944336, 0.018106367111206053, 0.018100223541259765, 0.018060287475585936, 0.018084863662719726, 0.01822003173828125, 0.018050048828125, 0.018067455291748045, 0.01802444839477539, 0.018092031478881835, 0.0180633602142334, 0.018075647354125975, 0.018059263229370116, 0.01801215934753418, 0.018120704650878908, 0.018118656158447266, 0.018052095413208007, 0.018059263229370116, 0.01803264045715332, 0.01808076858520508, 0.018027519226074217, 0.018068479537963866, 0.018044927597045898, 0.018103296279907227, 0.018051071166992186, 0.018059263229370116, 0.018085887908935547, 0.018165760040283203, 0.018076671600341796, 0.018082815170288084, 0.018075647354125975, 0.018073631286621095]",tokens/s,53.73325331661872,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 976, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 866, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 583, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 411, in forward query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 835, in forward inputs_embeds = self.project_in(inputs_embeds) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpid_tdu0g/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpk78f_iz6/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3220-405946200837c2fd0253c608;7c4b001a-b5d1-42a5-a691-77e9e0abfe63) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 535, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 535, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667794ac-026f58d02ee2752a44837128;a686c540-fb19-4023-9c7d-70844e6a7e64) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31c9-6bd1844149a1852a5856e3be;c0f852d3-435b-498e-b1b5-98b956b57208) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe7f-5896980c7b7340170d4b7919;93312c1c-f429-476d-b6f0-0b2be7a671da) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1213.62432,1005.060096,0.0,358.612992,318.913024,s,23,0.17213225698471069,0.007484011173248292,0.0002954761638434772,0.00736678409576416,0.007821849536895752,0.007996079683303832,0.008394954490661622,"[0.008502976417541504, 0.007853087902069091, 0.007601344108581543, 0.007378464221954346, 0.007268896102905273, 0.007655648231506348, 0.007272704124450684, 0.0072911038398742675, 0.007318528175354004, 0.007696896076202392, 0.007447904109954834, 0.007239327907562256, 0.0075630397796630855, 0.00736678409576416, 0.00744265604019165, 0.0073199357986450195, 0.0072409920692443844, 0.007361631870269775, 0.008011967658996581, 0.007415616035461426, 0.007298208236694336, 0.007331200122833252, 0.007253344058990478]",tokens/s,34206.25571953657,kWh,8.567134822637681e-08,4.694379339333844e-08,1.8252431812478173e-07,3.1513945974449696e-07,tokens/kWh,812338766.4862884,MB,1213.919232,1005.060096,0.0,358.612992,328.804864,s,23,10.179620391845702,0.44259219094981317,0.01146519580496095,0.440910888671875,0.4509195068359375,0.45184952392578126,0.47989753540039065,"[0.48779537963867187, 0.448875732421875, 0.44378817749023436, 0.45189608764648437, 0.4514304504394531, 0.44095208740234376, 0.4469196472167969, 0.44514892578125, 0.4451697692871094, 0.4480313720703125, 0.43288784790039064, 0.4424725341796875, 0.43655374145507814, 0.4358661193847656, 0.43621551513671875, 0.43264309692382813, 0.43890673828125, 0.4397151184082031, 0.440910888671875, 0.43340951538085937, 0.4314309692382812, 0.4308625793457031, 0.43773809814453124]",tokens/s,142.34322540757108,kWh,5.0166983791335025e-06,2.748945515611113e-06,8.752610144789825e-06,1.6518254039534438e-05,tokens/kWh,3813962.4108708547,,s,1448,10.335379461288454,0.007137692998127384,0.0009868185197434377,0.006937600135803222,0.007357337760925293,0.007660748958587645,0.014534717473983765,"[0.008195072174072266, 0.00812339210510254, 0.00801587200164795, 0.008057855606079101, 0.008011775970458984, 0.007906303882598878, 0.00791756820678711, 0.007833600044250488, 0.007880703926086426, 0.007682047843933106, 0.007670783996582031, 0.007756800174713135, 0.007795711994171142, 0.007764992237091065, 0.007696383953094482, 0.007667712211608887, 0.0077578239440917966, 0.00774348783493042, 0.007739391803741455, 0.007702527999877929, 0.007682047843933106, 0.007706624031066894, 0.007713791847229004, 0.007598144054412842, 0.007613376140594482, 0.007663616180419922, 0.007570432186126709, 0.007678976058959961, 0.007840767860412597, 0.00774451208114624, 0.007902207851409913, 0.007857151985168457, 0.007699456214904785, 0.007651328086853027, 0.0077608962059021, 0.007738368034362793, 0.007944191932678223, 0.007785535812377929, 0.007604159832000732, 0.007642111778259277, 0.007798783779144287, 0.00800153636932373, 0.007988224029541016, 0.007651328086853027, 0.007670783996582031, 0.007618559837341309, 0.007618559837341309, 0.0076871681213378906, 0.007786496162414551, 0.007779327869415284, 0.007712768077850342, 0.0077209601402282715, 0.007654399871826172, 0.00759500789642334, 0.007616511821746826, 0.007543807983398438, 0.007494656085968018, 0.007342080116271973, 0.0075345921516418455, 0.007566336154937744, 0.0072427520751953125, 0.007305215835571289, 0.015635456085205078, 0.007311359882354736, 0.007363584041595459, 0.007344128131866455, 0.0074403839111328125, 0.007830527782440186, 0.008690688133239746, 0.008432703971862793, 0.00839571189880371, 0.007342112064361572, 0.007289824008941651, 0.007284736156463623, 0.007127039909362793, 0.007045119762420654, 0.007054336071014404, 0.007201791763305664, 0.006994944095611572, 0.006974463939666748, 0.006927360057830811, 0.007143424034118652, 0.0069550080299377445, 0.006974463939666748, 0.00693555212020874, 0.006971392154693603, 0.0069816322326660156, 0.006960159778594971, 0.006988768100738525, 0.0069632000923156735, 0.006927360057830811, 0.00689356803894043, 0.007094272136688232, 0.006958079814910889, 0.0069621758460998535, 0.006900735855102539, 0.006949888229370117, 0.006958144187927246, 0.006945727825164795, 0.006938623905181885, 0.006916128158569336, 0.00694268798828125, 0.007147520065307617, 0.007170048236846924, 0.006980607986450196, 0.006986752033233643, 0.0069816322326660156, 0.006958079814910889, 0.007050240039825439, 0.006980607986450196, 0.006947840213775635, 0.00692633581161499, 0.006940671920776367, 0.007095295906066895, 0.006964223861694336, 0.00694374418258667, 0.006953983783721924, 0.006959104061126709, 0.006947840213775635, 0.006913023948669434, 0.0069550080299377445, 0.006953983783721924, 0.006941696166992187, 0.007145472049713135, 0.007001088142395019, 0.014993408203125, 0.00709222412109375, 0.006938623905181885, 0.006882304191589355, 0.006881279945373535, 0.006842368125915528, 0.006936575889587402, 0.006913023948669434, 0.006815743923187256, 0.006918144226074219, 0.006865920066833496, 0.006867968082427979, 0.0068577280044555666, 0.006858751773834228, 0.006863872051239014, 0.006875135898590088, 0.006853631973266602, 0.007525375843048096, 0.007489535808563232, 0.007001088142395019, 0.00703385591506958, 0.00728166389465332, 0.007162879943847656, 0.007536640167236328, 0.007039999961853028, 0.007048192024230957, 0.007231488227844239, 0.0069283838272094726, 0.006907904148101806, 0.0068055038452148435, 0.006906879901885986, 0.0068986878395080565, 0.0068249602317810056, 0.006871039867401123, 0.006906879901885986, 0.006820864200592041, 0.006870016098022461, 0.006823935985565186, 0.006914048194885254, 0.006959136009216309, 0.0072754878997802735, 0.007408639907836914, 0.007152703762054444, 0.007389120101928711, 0.007087103843688965, 0.007016448020935059, 0.007097343921661377, 0.007047167778015137, 0.0071157760620117185, 0.007316480159759522, 0.00704204797744751, 0.006982656002044678, 0.007080959796905518, 0.007049215793609619, 0.007058432102203369, 0.00704307222366333, 0.006999040126800537, 0.006974463939666748, 0.007352320194244385, 0.007324672222137451, 0.007363584041595459, 0.007136256217956543, 0.007076863765716553, 0.014966815948486327, 0.007409696102142334, 0.007022528171539306, 0.00738099193572998, 0.00745472002029419, 0.007050240039825439, 0.007060480117797851, 0.006982656002044678, 0.007408639907836914, 0.007277567863464355, 0.007288832187652588, 0.007314432144165039, 0.007117824077606201, 0.0073471999168395995, 0.007028736114501953, 0.007010303974151612, 0.007296000003814697, 0.00727347183227539, 0.007276576042175293, 0.007039968013763428, 0.0069621758460998535, 0.0071905279159545895, 0.00707583999633789, 0.007655424118041992, 0.0072427520751953125, 0.0071157760620117185, 0.007131135940551757, 0.007264256000518799, 0.007327744007110596, 0.007291903972625732, 0.00704204797744751, 0.0073820161819458, 0.0073175039291381834, 0.007080959796905518, 0.0070256638526916505, 0.007310336112976074, 0.007068672180175781, 0.007049215793609619, 0.007359488010406494, 0.0070266880989074704, 0.007018496036529541, 0.007278592109680176, 0.007113728046417236, 0.007501823902130127, 0.0073175039291381834, 0.007334911823272705, 0.007279615879058838, 0.0070830078125, 0.007013376235961914, 0.007484416007995606, 0.007090176105499267, 0.007300096035003662, 0.007268352031707764, 0.007077888011932373, 0.00702569580078125, 0.00705020809173584, 0.0067727680206298825, 0.006994912147521973, 0.0068577280044555666, 0.00687718391418457, 0.0069621758460998535, 0.0068392958641052244, 0.006848512172698974, 0.01468825626373291, 0.007111680030822754, 0.007364607810974121, 0.007379968166351319, 0.007293951988220215, 0.007302144050598145, 0.006940671920776367, 0.007279615879058838, 0.007326720237731933, 0.006956031799316406, 0.007027711868286133, 0.007060480117797851, 0.007076863765716553, 0.007342080116271973, 0.007336959838867187, 0.007346176147460938, 0.007097343921661377, 0.007057407855987549, 0.0070522880554199216, 0.0072837119102478025, 0.007009280204772949, 0.007290880203247071, 0.006999040126800537, 0.007349247932434082, 0.007264256000518799, 0.006982656002044678, 0.007415808200836181, 0.0070553598403930665, 0.007288832187652588, 0.0072540159225463864, 0.00703385591506958, 0.007256063938140869, 0.007058432102203369, 0.007041024208068848, 0.007236608028411865, 0.007375872135162354, 0.0073062400817871095, 0.007067647933959961, 0.007071743965148926, 0.006905856132507324, 0.006862847805023193, 0.006831103801727295, 0.006933504104614257, 0.007030784130096435, 0.007114751815795899, 0.006961184024810791, 0.007378911972045899, 0.007236608028411865, 0.0070860800743103025, 0.007071743965148926, 0.007001120090484619, 0.007321568012237549, 0.0070266880989074704, 0.007316480159759522, 0.007300096035003662, 0.007020544052124023, 0.00733900785446167, 0.00739737606048584, 0.007048192024230957, 0.007324672222137451, 0.006982656002044678, 0.0072765440940856935, 0.007237631797790528, 0.01510707187652588, 0.006985727787017822, 0.006979584217071533, 0.007490560054779053, 0.007256063938140869, 0.007046144008636474, 0.0067348480224609375, 0.006822912216186523, 0.006837247848510742, 0.006843391895294189, 0.006853631973266602, 0.006819839954376221, 0.006825984001159668, 0.006837247848510742, 0.0069283838272094726, 0.006841343879699707, 0.006808576107025147, 0.006806528091430664, 0.006895616054534912, 0.006815743923187256, 0.006829055786132812, 0.006858751773834228, 0.00704204797744751, 0.007030784130096435, 0.007068672180175781, 0.00708403205871582, 0.006973440170288086, 0.006959104061126709, 0.006964223861694336, 0.006947840213775635, 0.0069621758460998535, 0.00695091199874878, 0.006960127830505371, 0.006967296123504638, 0.006970367908477783, 0.006953983783721924, 0.006884352207183838, 0.006816768169403077, 0.006875135898590088, 0.006854656219482422, 0.006850560188293457, 0.006810624122619629, 0.006873087882995605, 0.006870016098022461, 0.007052320003509522, 0.007278560161590576, 0.007266304016113281, 0.0069847040176391605, 0.007292960166931152, 0.007252960205078125, 0.007074816226959229, 0.00704204797744751, 0.006967296123504638, 0.007014431953430176, 0.007003104209899903, 0.007286784172058106, 0.006977568149566651, 0.00709935998916626, 0.007393280029296875, 0.0073359360694885255, 0.007111680030822754, 0.0069928960800170895, 0.0073359360694885255, 0.014533632278442383, 0.006855679988861084, 0.007506944179534912, 0.007064576148986816, 0.007284736156463623, 0.0070522880554199216, 0.007080959796905518, 0.00704204797744751, 0.006953983783721924, 0.007001088142395019, 0.007280640125274658, 0.006976511955261231, 0.006967296123504638, 0.00738099193572998, 0.007073791980743408, 0.00704204797744751, 0.007324672222137451, 0.006993919849395752, 0.007066624164581299, 0.007356416225433349, 0.007020544052124023, 0.006980607986450196, 0.0068884482383728025, 0.007078911781311035, 0.007345183849334717, 0.007293920040130616, 0.007304192066192627, 0.007058432102203369, 0.007054336071014404, 0.006940671920776367, 0.007336959838867187, 0.007268352031707764, 0.007261184215545655, 0.007383039951324463, 0.007385087966918945, 0.007312384128570557, 0.007021567821502686, 0.0069959678649902345, 0.007068672180175781, 0.006961152076721191, 0.007123968124389648, 0.007250944137573242, 0.007342080116271973, 0.007113728046417236, 0.00704204797744751, 0.007173120021820068, 0.007278592109680176, 0.007329792022705078, 0.007449600219726562, 0.00695091199874878, 0.006851583957672119, 0.006880256175994873, 0.006897664070129395, 0.006848512172698974, 0.007051263809204102, 0.0069253120422363285, 0.006867968082427979, 0.006882304191589355, 0.006858751773834228, 0.006792191982269287, 0.006841343879699707, 0.006779903888702392, 0.006895616054534912, 0.01447219181060791, 0.007172095775604248, 0.0073471999168395995, 0.007576576232910156, 0.007465983867645264, 0.007352320194244385, 0.007072768211364746, 0.007324672222137451, 0.007292928218841553, 0.007303167819976806, 0.006990848064422607, 0.007078911781311035, 0.006947840213775635, 0.0070266880989074704, 0.007362559795379638, 0.0069816322326660156, 0.007054336071014404, 0.0070348801612854, 0.007058432102203369, 0.006946815967559815, 0.006883359909057617, 0.006878176212310791, 0.006874112129211426, 0.006825984001159668, 0.007016448020935059, 0.007073791980743408, 0.007029759883880615, 0.006994944095611572, 0.007189504146575928, 0.007060480117797851, 0.007044095993041993, 0.006945792198181152, 0.007048192024230957, 0.006937600135803222, 0.006941696166992187, 0.006807551860809326, 0.006872064113616944, 0.00687820816040039, 0.006831103801727295, 0.006799359798431396, 0.0067870721817016606, 0.006863872051239014, 0.00684441614151001, 0.006880256175994873, 0.00683622407913208, 0.0068351998329162595, 0.007450623989105225, 0.00693452787399292, 0.007296000003814697, 0.0073768959045410155, 0.006998015880584717, 0.007038976192474365, 0.007259136199951172, 0.007058432102203369, 0.007111680030822754, 0.007057407855987549, 0.007038976192474365, 0.007117824077606201, 0.007286784172058106, 0.007263232231140137, 0.007035903930664063, 0.007259136199951172, 0.00704307222366333, 0.01439027214050293, 0.00682700777053833, 0.006859776020050049, 0.007128064155578613, 0.007023615837097168, 0.0073175039291381834, 0.006998015880584717, 0.007355391979217529, 0.007331840038299561, 0.00693555212020874, 0.007288832187652588, 0.007228415966033935, 0.007097343921661377, 0.006993919849395752, 0.006975488185882568, 0.006969344139099121, 0.007475200176239013, 0.007070720195770264, 0.0070225920677185055, 0.007303167819976806, 0.006873087882995605, 0.006823935985565186, 0.00674508810043335, 0.006897664070129395, 0.0068392958641052244, 0.007434239864349365, 0.006982656002044678, 0.007323647975921631, 0.0073062400817871095, 0.007073791980743408, 0.006951935768127441, 0.007050240039825439, 0.006871039867401123, 0.006867968082427979, 0.0067717118263244626, 0.0068915200233459475, 0.006905856132507324, 0.006823935985565186, 0.006816768169403077, 0.006823935985565186, 0.006849535942077637, 0.006812672138214112, 0.006834239959716797, 0.0068269438743591305, 0.006854656219482422, 0.006822912216186523, 0.006806528091430664, 0.007372799873352051, 0.0073431038856506346, 0.007008255958557129, 0.007047167778015137, 0.007320576190948487, 0.006998015880584717, 0.007415808200836181, 0.006987775802612305, 0.007264256000518799, 0.007129087924957276, 0.00744755220413208, 0.007197696208953858, 0.007391232013702393, 0.007756800174713135, 0.00724889612197876, 0.007106560230255127, 0.015258624076843261, 0.007053311824798584, 0.00704307222366333, 0.00687718391418457, 0.0068986878395080565, 0.006837247848510742, 0.006834176063537598, 0.006780928134918213, 0.006927360057830811, 0.006812672138214112, 0.006830080032348633, 0.006882304191589355, 0.006819839954376221, 0.006924287796020508, 0.006856704235076904, 0.007327744007110596, 0.0070225920677185055, 0.007003136157989502, 0.00709222412109375, 0.007301119804382325, 0.007288832187652588, 0.0069959678649902345, 0.0072724480628967286, 0.007219200134277344, 0.00728985595703125, 0.007054336071014404, 0.007180287837982178, 0.007039999961853028, 0.007304192066192627, 0.007309343814849854, 0.007380959987640381, 0.00703385591506958, 0.006970367908477783, 0.006937600135803222, 0.007117824077606201, 0.006958079814910889, 0.007385087966918945, 0.006951935768127441, 0.007331840038299561, 0.007268352031707764, 0.0072837119102478025, 0.006937600135803222, 0.007274496078491211, 0.007081984043121338, 0.007299071788787842, 0.006993919849395752, 0.007104512214660645, 0.007288832187652588, 0.0070563840866088865, 0.007156735897064209, 0.00703385591506958, 0.007282688140869141, 0.00704307222366333, 0.0074035201072692874, 0.006968319892883301, 0.007314432144165039, 0.007324672222137451, 0.007074816226959229, 0.006970367908477783, 0.008046591758728027, 0.007085055828094483, 0.0070522880554199216, 0.007054336071014404, 0.01510912036895752, 0.0069918718338012695, 0.007070720195770264, 0.0072724480628967286, 0.006951935768127441, 0.006856704235076904, 0.006837247848510742, 0.006831103801727295, 0.006818816184997558, 0.006773759841918945, 0.006811647891998291, 0.006907904148101806, 0.0067983360290527345, 0.0068689918518066405, 0.006816768169403077, 0.006976511955261231, 0.006865920066833496, 0.006833151817321777, 0.006848512172698974, 0.006790143966674805, 0.00694374418258667, 0.006790143966674805, 0.006804480075836182, 0.006815743923187256, 0.006832159996032715, 0.006866911888122559, 0.00683622407913208, 0.006862847805023193, 0.006796288013458252, 0.006811647891998291, 0.006863872051239014, 0.006809599876403808, 0.006937600135803222, 0.006829055786132812, 0.006810624122619629, 0.0068618240356445315, 0.00677785587310791, 0.006871039867401123, 0.006795263767242431, 0.006913023948669434, 0.0068392958641052244, 0.0069212160110473635, 0.0068249602317810056, 0.006859776020050049, 0.006917119979858399, 0.006814720153808594, 0.006841343879699707, 0.006848512172698974, 0.006887423992156983, 0.006906879901885986, 0.006881279945373535, 0.006870016098022461, 0.006802432060241699, 0.0068403840065002445, 0.006761407852172852, 0.00687820816040039, 0.006825984001159668, 0.00673689603805542, 0.006892543792724609, 0.0068884482383728025, 0.006841343879699707, 0.006825984001159668, 0.006799359798431396, 0.014756863594055175, 0.007226367950439453, 0.007236608028411865, 0.00809267234802246, 0.0074700798988342285, 0.007097343921661377, 0.007128064155578613, 0.0073431038856506346, 0.007114751815795899, 0.00722431993484497, 0.007000063896179199, 0.007351295948028564, 0.007067647933959961, 0.007016448020935059, 0.00714137601852417, 0.007134208202362061, 0.007184383869171143, 0.007386112213134765, 0.007300096035003662, 0.0072837119102478025, 0.007336959838867187, 0.007326720237731933, 0.007062560081481934, 0.007136223793029785, 0.006829055786132812, 0.006880256175994873, 0.006904863834381104, 0.006792160034179687, 0.006924287796020508, 0.006826015949249267, 0.0067604160308837894, 0.006821887969970703, 0.006882304191589355, 0.006898719787597657, 0.006868959903717041, 0.006837247848510742, 0.006811647891998291, 0.006829055786132812, 0.006916096210479736, 0.006834176063537598, 0.006846464157104492, 0.006814720153808594, 0.006897664070129395, 0.006904831886291504, 0.006899712085723877, 0.006858751773834228, 0.006920191764831543, 0.006802495956420899, 0.006924223899841308, 0.006905856132507324, 0.006837247848510742, 0.006739967823028564, 0.006881311893463134, 0.006877151966094971, 0.006774784088134766, 0.006870016098022461, 0.0068689918518066405, 0.006977536201477051, 0.0067348480224609375, 0.006831103801727295, 0.007082047939300537, 0.007328735828399658, 0.007105504035949707, 0.015265791893005372, 0.007007232189178467, 0.007094272136688232, 0.007019519805908203, 0.007029759883880615, 0.007334911823272705, 0.007013376235961914, 0.007321599960327148, 0.007360511779785156, 0.006967296123504638, 0.007035903930664063, 0.007107583999633789, 0.006987775802612305, 0.007321599960327148, 0.006990848064422607, 0.007080959796905518, 0.007327744007110596, 0.006852608203887939, 0.006818816184997558, 0.006819839954376221, 0.006850560188293457, 0.006814720153808594, 0.006842368125915528, 0.006866943836212158, 0.006765567779541016, 0.006838272094726563, 0.006903808116912841, 0.006816768169403077, 0.0068351998329162595, 0.006870016098022461, 0.006903808116912841, 0.0067420158386230465, 0.00682700777053833, 0.006851583957672119, 0.00683622407913208, 0.006849535942077637, 0.00695091199874878, 0.006930431842803955, 0.006820864200592041, 0.006825984001159668, 0.006842368125915528, 0.006903840065002441, 0.006749152183532715, 0.006801407814025879, 0.0068321280479431154, 0.006884352207183838, 0.0067983360290527345, 0.006927360057830811, 0.0068659520149230956, 0.006851552009582519, 0.006815743923187256, 0.006879231929779053, 0.006770688056945801, 0.006820864200592041, 0.0068351998329162595, 0.006905856132507324, 0.006778880119323731, 0.006809599876403808, 0.0069324798583984375, 0.006813695907592773, 0.0068884482383728025, 0.006900735855102539, 0.006858751773834228, 0.014593024253845215, 0.006931519985198974, 0.006945727825164795, 0.0068577280044555666, 0.006807551860809326, 0.00687820816040039, 0.006846464157104492, 0.0068321280479431154, 0.006821887969970703, 0.006852608203887939, 0.006850560188293457, 0.00689356803894043, 0.006864895820617676, 0.006930431842803955, 0.006833151817321777, 0.006841343879699707, 0.006866943836212158, 0.006849535942077637, 0.0072499198913574215, 0.007202816009521484, 0.006822912216186523, 0.006814720153808594, 0.006889472007751465, 0.006825984001159668, 0.006730751991271973, 0.006906879901885986, 0.00687820816040039, 0.006924287796020508, 0.006841407775878906, 0.006832064151763916, 0.006874112129211426, 0.006864895820617676, 0.006862847805023193, 0.006816768169403077, 0.006825984001159668, 0.006821887969970703, 0.006904831886291504, 0.006820864200592041, 0.006754303932189941, 0.006837247848510742, 0.006806528091430664, 0.0069632000923156735, 0.006896639823913574, 0.006838272094726563, 0.0068690237998962405, 0.006818784236907959, 0.006925343990325928, 0.006814688205718994, 0.006924287796020508, 0.006721536159515381, 0.006793216228485107, 0.006865920066833496, 0.006818880081176757, 0.0068269438743591305, 0.0068884482383728025, 0.007058432102203369, 0.008130559921264649, 0.007341055870056152, 0.007233535766601563, 0.00708403205871582, 0.00703385591506958, 0.0070256638526916505, 0.006979584217071533, 0.01458892822265625, 0.006846464157104492, 0.006864895820617676, 0.006960127830505371, 0.006848512172698974, 0.00684441614151001, 0.006829055786132812, 0.006831103801727295, 0.006892543792724609, 0.0068618240356445315, 0.006882304191589355, 0.006905856132507324, 0.006882304191589355, 0.006827040195465088, 0.006789087772369385, 0.006854656219482422, 0.0068249602317810056, 0.0068280320167541505, 0.006865920066833496, 0.006848512172698974, 0.006830080032348633, 0.00687820816040039, 0.0069253120422363285, 0.006853631973266602, 0.006829055786132812, 0.00682700777053833, 0.006895616054534912, 0.006739967823028564, 0.006937600135803222, 0.006820864200592041, 0.006876160144805908, 0.006912000179290771, 0.006804512023925781, 0.006790143966674805, 0.00677785587310791, 0.00704099178314209, 0.007131135940551757, 0.007195648193359375, 0.00703385591506958, 0.007274496078491211, 0.007260159969329834, 0.007279615879058838, 0.007324672222137451, 0.006994944095611572, 0.006920191764831543, 0.006999040126800537, 0.007361536026000977, 0.0070553598403930665, 0.007038976192474365, 0.006946815967559815, 0.007103487968444824, 0.006858751773834228, 0.006846464157104492, 0.006766592025756836, 0.0068689918518066405, 0.006909952163696289, 0.006982656002044678, 0.006907904148101806, 0.006811647891998291, 0.00682700777053833, 0.0067870721817016606, 0.006804480075836182, 0.006803455829620361, 0.014484479904174804, 0.00684441614151001, 0.0068618240356445315, 0.006823935985565186, 0.006817791938781738, 0.0067573761940002445, 0.0068076162338256835, 0.00683513593673706, 0.00682700777053833, 0.006851583957672119, 0.0067758078575134275, 0.0068351998329162595, 0.0068392958641052244, 0.0068618240356445315, 0.006919167995452881, 0.006856704235076904, 0.006812672138214112, 0.0068055038452148435, 0.006791168212890625, 0.006724607944488525, 0.006815743923187256, 0.00690176010131836, 0.006831103801727295, 0.006850560188293457, 0.0068392958641052244, 0.006795263767242431, 0.0067338237762451176, 0.006799359798431396, 0.006945792198181152, 0.006814720153808594, 0.0068853759765625, 0.006910975933074951, 0.006845471858978272, 0.006775775909423828, 0.006874112129211426, 0.006841343879699707, 0.0068689918518066405, 0.00678604793548584, 0.006905856132507324, 0.006766592025756836, 0.006804480075836182, 0.006808576107025147, 0.006880256175994873, 0.006937600135803222, 0.006846464157104492, 0.006829055786132812, 0.006797311782836914, 0.006800384044647217, 0.0068986878395080565, 0.006799359798431396, 0.006816768169403077, 0.006882304191589355, 0.00695091199874878, 0.006711296081542969, 0.00687718391418457, 0.006760447978973389, 0.006851583957672119, 0.006994944095611572, 0.007008255958557129, 0.007278592109680176, 0.007364607810974121, 0.007021567821502686, 0.007110655784606934, 0.014486528396606446, 0.0067983360290527345, 0.0067758078575134275, 0.006808576107025147, 0.006819839954376221, 0.006845439910888672, 0.006817791938781738, 0.006862847805023193, 0.006850560188293457, 0.006914048194885254, 0.006873087882995605, 0.006830080032348633, 0.006825984001159668, 0.006924287796020508, 0.0068055038452148435, 0.006867968082427979, 0.0068986878395080565, 0.006773759841918945, 0.006887423992156983, 0.006834176063537598, 0.006807551860809326, 0.006825984001159668, 0.006724607944488525, 0.006808576107025147, 0.006781951904296875, 0.006873087882995605, 0.006897664070129395, 0.006820864200592041, 0.006953983783721924, 0.006814720153808594, 0.0068055038452148435, 0.006864895820617676, 0.007054336071014404, 0.007000063896179199, 0.006851583957672119, 0.00693452787399292, 0.006875135898590088, 0.006767615795135498, 0.006912000179290771, 0.006930431842803955, 0.007072768211364746, 0.007323647975921631, 0.007137343883514404, 0.007246784210205078, 0.007519231796264648, 0.007401472091674805, 0.007305215835571289, 0.007124991893768311, 0.007411712169647216, 0.007051263809204102, 0.00744652795791626, 0.007027711868286133, 0.007122943878173828, 0.006968319892883301, 0.007057407855987549, 0.006961152076721191, 0.007019519805908203, 0.006977536201477051, 0.007052351951599121, 0.00700819206237793, 0.006982656002044678, 0.007037951946258545, 0.007131135940551757, 0.014568448066711426, 0.00683622407913208, 0.006825984001159668, 0.00679423999786377, 0.006896639823913574, 0.006814720153808594, 0.00689356803894043, 0.006833151817321777, 0.006890495777130127, 0.0068577280044555666, 0.006875135898590088, 0.006852608203887939, 0.00677785587310791, 0.006897664070129395, 0.006802432060241699, 0.006900735855102539, 0.006795263767242431, 0.006851583957672119, 0.006871039867401123, 0.0067758078575134275, 0.0068249602317810056, 0.006874112129211426, 0.0068280320167541505, 0.006838272094726563, 0.0067123198509216305, 0.006875135898590088, 0.006866943836212158, 0.006851615905761718, 0.006802400112152099, 0.0067983360290527345, 0.006980607986450196, 0.0067338237762451176, 0.006904831886291504, 0.006790143966674805, 0.006918144226074219, 0.006833151817321777, 0.006924287796020508, 0.007085055828094483, 0.007234560012817383, 0.007293951988220215, 0.006968319892883301, 0.0069202561378479, 0.007036863803863525, 0.007241727828979493, 0.00728166389465332, 0.007208960056304932, 0.00697657585144043, 0.00704095983505249, 0.006946815967559815, 0.00724889612197876, 0.007309311866760254, 0.007264256000518799, 0.007353343963623047, 0.006953983783721924, 0.007078911781311035, 0.00727347183227539, 0.007308288097381592, 0.007346176147460938, 0.007018496036529541, 0.007307263851165771, 0.007106560230255127, 0.007044095993041993, 0.007268352031707764, 0.0158341121673584, 0.0071833600997924804, 0.007159808158874512, 0.007171072006225586, 0.007048192024230957, 0.007072768211364746, 0.007048192024230957, 0.007138304233551026, 0.007122943878173828, 0.007095295906066895, 0.007088128089904785, 0.007096320152282715, 0.007090176105499267, 0.0070553598403930665, 0.006953983783721924, 0.007385087966918945, 0.007057407855987549, 0.0069928960800170895, 0.0069621758460998535, 0.006957056045532227, 0.0069632000923156735, 0.0069632000923156735, 0.006944767951965332, 0.006987775802612305, 0.006967296123504638, 0.0069959678649902345, 0.006947840213775635, 0.006946815967559815, 0.006946815967559815, 0.006977536201477051, 0.006964223861694336, 0.007103487968444824, 0.006945792198181152, 0.007001088142395019, 0.006977536201477051, 0.0069847040176391605, 0.006988800048828125, 0.006998015880584717, 0.006968319892883301, 0.006985727787017822, 0.006965248107910156, 0.0070266880989074704, 0.0069959678649902345, 0.006982656002044678, 0.007024640083312988, 0.007001088142395019, 0.006975488185882568, 0.006843391895294189, 0.006909952163696289, 0.006802495956420899, 0.00689247989654541, 0.006854688167572022, 0.0068873920440673825, 0.006812672138214112, 0.006874112129211426, 0.006903808116912841, 0.006825984001159668, 0.006889472007751465, 0.0068618240356445315, 0.006879231929779053, 0.006908927917480469, 0.006876160144805908, 0.006922239780426025, 0.014621696472167968, 0.006899712085723877, 0.00693555212020874, 0.006847519874572754, 0.006882271766662598, 0.00687820816040039, 0.006944767951965332, 0.006766592025756836, 0.006940671920776367, 0.006897664070129395, 0.006865920066833496, 0.006841407775878906, 0.006919104099273682, 0.00689356803894043, 0.006847487926483154, 0.006870016098022461, 0.00690176010131836, 0.006906879901885986, 0.006912000179290771, 0.006965248107910156, 0.006863872051239014, 0.006830080032348633, 0.006730751991271973, 0.00690176010131836, 0.006927360057830811, 0.006810624122619629, 0.007012351989746094, 0.00689356803894043, 0.00691919994354248, 0.006855648040771484, 0.006883327960968018, 0.0069253120422363285, 0.006875135898590088, 0.006892543792724609, 0.006822912216186523, 0.006840320110321045, 0.006876160144805908, 0.006833151817321777, 0.006819839954376221, 0.006812672138214112, 0.006814720153808594, 0.006903808116912841, 0.006895616054534912, 0.0068321280479431154, 0.006881279945373535, 0.006841343879699707, 0.006731776237487793, 0.006875135898590088, 0.0069918718338012695, 0.006924287796020508, 0.0068351998329162595, 0.006939648151397705, 0.00687718391418457, 0.006896639823913574, 0.006813695907592773, 0.006858751773834228, 0.006916096210479736, 0.0068884482383728025, 0.0069027838706970214, 0.006879231929779053, 0.00689356803894043, 0.006807551860809326, 0.006790143966674805, 0.0144650239944458, 0.006765567779541016, 0.006876160144805908, 0.00690176010131836, 0.006822912216186523, 0.006920191764831543, 0.006781951904296875, 0.006908927917480469, 0.006758399963378906, 0.0068884482383728025, 0.006837247848510742, 0.006825984001159668, 0.006895616054534912, 0.006811647891998291, 0.006904831886291504, 0.006757440090179444, 0.006893504142761231, 0.006813695907592773, 0.006830080032348633, 0.006797311782836914, 0.0068853759765625, 0.00674508810043335, 0.00678604793548584, 0.006873087882995605, 0.006900735855102539, 0.006816768169403077, 0.006854656219482422, 0.00682700777053833, 0.006744063854217529, 0.006923264026641846, 0.006816768169403077, 0.006825984001159668, 0.006808576107025147, 0.006788095951080322, 0.006864895820617676, 0.0067983360290527345, 0.007007232189178467, 0.006815743923187256, 0.006808576107025147, 0.006830080032348633, 0.006899712085723877, 0.006894591808319092, 0.006729728221893311, 0.006913023948669434, 0.006912000179290771, 0.007111680030822754, 0.006854656219482422, 0.006816768169403077, 0.006829055786132812, 0.006802432060241699, 0.0068986878395080565, 0.006889472007751465, 0.0068392958641052244, 0.0067358717918396, 0.006773759841918945, 0.006892543792724609, 0.0067686400413513184, 0.006929408073425293, 0.006807551860809326, 0.006790143966674805, 0.006814720153808594, 0.006875135898590088, 0.006815743923187256, 0.014535679817199706, 0.006904863834381104, 0.006907872200012207, 0.006808576107025147, 0.006803455829620361, 0.006793216228485107, 0.00690176010131836, 0.006812672138214112, 0.006782976150512696, 0.006817791938781738, 0.006718463897705078, 0.006838304042816162, 0.006800352096557617, 0.006853631973266602, 0.006856704235076904, 0.006875135898590088, 0.006808576107025147, 0.006815743923187256, 0.006871039867401123, 0.006882304191589355, 0.0067123198509216305, 0.00693555212020874, 0.0069928960800170895, 0.0067983360290527345, 0.006738944053649902, 0.006795263767242431, 0.006780928134918213, 0.0068915200233459475, 0.006890495777130127, 0.006781951904296875, 0.006867968082427979, 0.00683622407913208, 0.006870016098022461, 0.0068055038452148435, 0.006714367866516113, 0.006806528091430664, 0.006781951904296875, 0.006781951904296875, 0.006825984001159668, 0.006850560188293457, 0.006818816184997558, 0.006812672138214112, 0.006882368087768554, 0.006911935806274414, 0.006807551860809326, 0.00689356803894043, 0.006797311782836914, 0.006874176025390625, 0.00681056022644043, 0.006914048194885254, 0.006818816184997558, 0.006825984001159668, 0.00682700777053833, 0.006793216228485107, 0.006847487926483154, 0.0068351998329162595, 0.0068055038452148435, 0.006850560188293457, 0.006801407814025879, 0.006872064113616944, 0.0068986878395080565, 0.0068689918518066405, 0.006813695907592773, 0.014460927963256836, 0.006829055786132812, 0.006843391895294189, 0.00684441614151001, 0.006938623905181885, 0.006749184131622315, 0.0068392958641052244, 0.00688640022277832, 0.006897664070129395, 0.00684441614151001, 0.006819839954376221, 0.0068280320167541505, 0.006834176063537598, 0.006892543792724609, 0.00687718391418457, 0.006845439910888672, 0.0069632000923156735, 0.006806528091430664, 0.0068915200233459475, 0.0069283838272094726, 0.00688640022277832, 0.006917119979858399, 0.006831103801727295, 0.006825984001159668, 0.0069918718338012695, 0.007312384128570557, 0.007332863807678222, 0.007145503997802735, 0.0069437122344970705, 0.00729702377319336, 0.00725708818435669, 0.006986752033233643, 0.007379968166351319, 0.0073359360694885255, 0.007027711868286133, 0.007037951946258545, 0.007255040168762207, 0.007329792022705078, 0.006730751991271973, 0.007303167819976806, 0.007400447845458984, 0.006849535942077637, 0.006818816184997558, 0.0067983360290527345, 0.006873087882995605, 0.006714367866516113, 0.006894591808319092, 0.0069027838706970214, 0.006818816184997558, 0.006830080032348633, 0.006841343879699707, 0.006900735855102539, 0.0067348480224609375, 0.006866943836212158, 0.006940671920776367, 0.00672870397567749, 0.006923264026641846, 0.0068055038452148435, 0.006945856094360351, 0.00685152006149292, 0.006808576107025147, 0.006906879901885986, 0.0068280320167541505]",tokens/s,140.10129046771218,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a305f-595547147594e39172f64f02;1426d1c8-950f-4489-a9d4-b893a21732fc) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp75hlaflf/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 414, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1204, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1004, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 410, in forward qkv_states = self.wqkv(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 1124, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 950, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 578, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 224, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 45, in __init__ assert self.in_features % self.group_size == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b74-3a78782862990d623f2dd3e5;8b048c88-95bd-4b68-bcfc-4976db9bbf11) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3394-0d045b3c54c88bc06323fe83;efd30934-412d-471d-a02e-b593fe1e75e7) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 317, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 1034, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 274, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 672, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 84, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33eb-605fd85401e1d15a0059693f;56c70422-a9e4-4013-989e-7bd42a97a94b) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3292-7194fcc56c929236525d87d6;4a1e57b1-396e-4f98-8780-a2b94591c8b7) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30e1-7567f5d779d9a6c5274e0fcf;6bfab583-c506-4a6f-b9a2-8ce9aac18684) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c19-5c0f07ff1b1c68273d59e2cf;2e693fda-d77e-42b6-8d22-c38d41635301) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32e7-097022e23f6249a352dec33e;5b6d6e9f-fe6e-47a7-b3fb-b3e51a511d32) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 251, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 667, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 536, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 272, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 163, in forward qkv = self.qkv_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3081-75f0f20923280c3836d6bbee;f43b9674-7fac-4081-ad24-a96a08fd86eb) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 900, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 797, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 453, in forward attn_outputs = self.self_attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-40b/4a70170c215b36a3cce4b4253f6d0612bb7d4146/modeling_falcon.py"", line 291, in forward fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3499-6aabf7c4337b574750694b92;584ec456-e906-40ea-8a2e-eba9a7a9d864) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fc5-4fa45f5c2e2bb7c54c4588fa;0aaea998-7bb1-4540-a99a-8e8263bfeb62) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc38a-515c8dcb0e5d8e2e26714a05;114107fc-2a33-4791-a46a-93b2b5784bd1) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 667, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 536, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 272, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 163, in forward qkv = self.qkv_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3e0-75240565328388293edffc07;faaddfae-52b6-4515-a373-b5789718e9fe) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2e2-51a109d2525787dc0fce9826;05b57235-5362-4455-8ffd-d7f4f8c20c27) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1166, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 1045, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 776, in forward attn_outputs, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/phi/modeling_phi.py"", line 314, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc334-3e1986ee743033a90af295be;e29c43b5-1dc2-4d68-9eb3-3e26cbb9c877) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f17-05a2dac8057de8a932009e2b;5a6f2903-1532-406c-a1ed-41592d49ab63) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 326, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ec2-7943e57209fd451d2a87345f;76ae8299-d009-4e97-9e73-66d828c67d21) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1255.817216,2645.03296,0.0,1998.585856,1692.285952,s,10,0.2421620788574219,0.02421620788574219,0.0007151447258735442,0.02405540752410889,0.025104490089416506,0.025451717281341553,0.02572949903488159,"[0.0257989444732666, 0.02358336067199707, 0.023982303619384766, 0.023516639709472657, 0.023505535125732422, 0.02412851142883301, 0.023632959365844728, 0.024505855560302735, 0.024480640411376955, 0.025027328491210938]",tokens/s,10571.432208043005,kWh,2.7753140546097755e-07,1.5207407634566343e-07,8.297378834037525e-07,1.2593433652103935e-06,tokens/kWh,203280540.5356871,MB,1257.193472,2645.03296,0.0,1998.585856,1740.091904,s,10,13.934907348632812,1.3934907348632812,0.025773039375764546,1.3917061157226562,1.4230255249023438,1.4235073303222656,1.423892774658203,"[1.4067874755859375, 1.374333740234375, 1.3706243896484376, 1.3586650390625, 1.362622314453125, 1.376624755859375, 1.4168704833984376, 1.42291845703125, 1.4239891357421874, 1.4214715576171875]",tokens/s,45.21020371633909,kWh,1.628260937647143e-05,8.921926230863981e-06,3.281937236919722e-05,5.802390797653265e-05,tokens/kWh,1085759.339503294,,s,629,14.127670267105099,0.02246052506693975,0.0029503195630523993,0.022187007904052734,0.02265027198791504,0.02293166084289551,0.04536406036376953,"[0.022769664764404295, 0.022405120849609376, 0.022578176498413087, 0.023195648193359376, 0.023209983825683594, 0.022948863983154297, 0.022355968475341798, 0.022914047241210937, 0.023353343963623048, 0.023217151641845703, 0.023201791763305665, 0.022947839736938477, 0.023254016876220703, 0.023146495819091797, 0.023326719284057617, 0.023176191329956054, 0.022323200225830078, 0.022245376586914063, 0.02193715286254883, 0.02272153663635254, 0.023012351989746094, 0.023163904190063478, 0.02294272041320801, 0.02234060859680176, 0.02215936088562012, 0.02208768081665039, 0.022123519897460937, 0.02208460807800293, 0.022091775894165038, 0.02206617546081543, 0.022107135772705077, 0.02208358383178711, 0.022021120071411132, 0.022038528442382813, 0.02208051109313965, 0.0220150089263916, 0.02190025520324707, 0.022139904022216796, 0.022179840087890625, 0.02205904006958008, 0.02192790412902832, 0.022039552688598633, 0.022223871231079103, 0.02220953559875488, 0.02188390350341797, 0.02201190376281738, 0.022675455093383787, 0.02223411178588867, 0.02206105613708496, 0.022090751647949217, 0.021857280731201172, 0.021324800491333007, 0.021366783142089844, 0.021997568130493163, 0.022114303588867186, 0.02204569625854492, 0.022026239395141603, 0.02142207908630371, 0.021380096435546874, 0.021425151824951173, 0.021315584182739256, 0.0213309440612793, 0.04596223831176758, 0.021974016189575195, 0.022114303588867186, 0.022154239654541014, 0.02209280014038086, 0.02191974449157715, 0.02253824043273926, 0.021955583572387697, 0.022146047592163084, 0.022205440521240235, 0.02226585578918457, 0.02211737632751465, 0.02268262481689453, 0.02250649642944336, 0.021916671752929686, 0.02152448081970215, 0.021397504806518555, 0.021328895568847657, 0.021774335861206053, 0.021780479431152345, 0.021586944580078125, 0.021465087890625, 0.022072320938110353, 0.021960704803466798, 0.022072320938110353, 0.022037504196166992, 0.022006784439086914, 0.022033407211303712, 0.021959680557250977, 0.022055936813354493, 0.022128639221191407, 0.022040576934814454, 0.0220948486328125, 0.022106111526489256, 0.02224742317199707, 0.02228531265258789, 0.02211327934265137, 0.021979135513305666, 0.022032384872436524, 0.0219289608001709, 0.021993471145629884, 0.02209791946411133, 0.022039552688598633, 0.0212992000579834, 0.021410816192626952, 0.021403648376464843, 0.02163609504699707, 0.02124595260620117, 0.021142528533935546, 0.021389312744140625, 0.021386240005493166, 0.0214466552734375, 0.021399551391601563, 0.021315584182739256, 0.021326847076416015, 0.021357568740844726, 0.021409791946411134, 0.021377023696899415, 0.021366783142089844, 0.021364736557006835, 0.021374975204467773, 0.021353471755981446, 0.021223424911499023, 0.04640563201904297, 0.02209791946411133, 0.022133760452270508, 0.02215936088562012, 0.022190080642700196, 0.02220953559875488, 0.022187007904052734, 0.022112255096435548, 0.022176767349243166, 0.022145023345947267, 0.022112255096435548, 0.022082559585571288, 0.022288383483886717, 0.02206211280822754, 0.022308832168579103, 0.02231091117858887, 0.02206924819946289, 0.022254592895507814, 0.02222489547729492, 0.022042623519897463, 0.0214835205078125, 0.021370880126953123, 0.021332992553710937, 0.021533695220947266, 0.021412864685058593, 0.02131455993652344, 0.021326847076416015, 0.021283840179443358, 0.021344255447387696, 0.02128895950317383, 0.0213309440612793, 0.021343231201171875, 0.02143027114868164, 0.021319679260253906, 0.021373952865600586, 0.0214783992767334, 0.021329919815063478, 0.021348352432250976, 0.021346303939819337, 0.021257247924804688, 0.021285856246948242, 0.02147225570678711, 0.021311487197875977, 0.02129817581176758, 0.021356544494628905, 0.021321727752685548, 0.021562368392944335, 0.021621759414672852, 0.02251571273803711, 0.022560768127441407, 0.02229555130004883, 0.022179840087890625, 0.02206003189086914, 0.022012928009033202, 0.021967872619628907, 0.02206105613708496, 0.02226688003540039, 0.021485567092895508, 0.021444608688354492, 0.021311487197875977, 0.02147532844543457, 0.02142617607116699, 0.02152448081970215, 0.04532940673828125, 0.021456960678100587, 0.0213636474609375, 0.021334016799926758, 0.021410816192626952, 0.02126540756225586, 0.021332992553710937, 0.021356544494628905, 0.021361663818359376, 0.021425151824951173, 0.021332992553710937, 0.021401599884033205, 0.02150297546386719, 0.021410816192626952, 0.02147635269165039, 0.021433343887329103, 0.021370880126953123, 0.021360639572143555, 0.021415935516357423, 0.02128179168701172, 0.02147225570678711, 0.02146611213684082, 0.021404672622680664, 0.021355520248413085, 0.021335039138793945, 0.02127052879333496, 0.02127462387084961, 0.021300224304199217, 0.02129408073425293, 0.02132275199890137, 0.021327871322631836, 0.021271551132202148, 0.02127462387084961, 0.021307392120361326, 0.02125312042236328, 0.021362688064575194, 0.02129817581176758, 0.0212674560546875, 0.02125721549987793, 0.021336063385009766, 0.021368831634521485, 0.021367807388305664, 0.022182912826538087, 0.02204364776611328, 0.022477823257446287, 0.022156288146972656, 0.022164480209350586, 0.022153215408325197, 0.02215116882324219, 0.022112255096435548, 0.02223308753967285, 0.021774335861206053, 0.021360639572143555, 0.021424127578735352, 0.02147430419921875, 0.021820415496826173, 0.022039552688598633, 0.021996543884277343, 0.021987327575683592, 0.021978111267089845, 0.022215679168701173, 0.021952512741088868, 0.021716991424560548, 0.045139968872070314, 0.02131865692138672, 0.02143337631225586, 0.021398496627807618, 0.021307392120361326, 0.021493759155273438, 0.021393407821655275, 0.021301248550415038, 0.021374975204467773, 0.02145280075073242, 0.021390335083007812, 0.02150297546386719, 0.021489664077758788, 0.021440511703491212, 0.02142720031738281, 0.021526527404785157, 0.021372928619384765, 0.021565439224243164, 0.02143129539489746, 0.02141798400878906, 0.021360639572143555, 0.021386240005493166, 0.02146303939819336, 0.021389312744140625, 0.021300224304199217, 0.021390335083007812, 0.02137500762939453, 0.02170979118347168, 0.021416959762573243, 0.02166169548034668, 0.021412864685058593, 0.02129100799560547, 0.021393407821655275, 0.021315584182739256, 0.02129817581176758, 0.021374975204467773, 0.02126540756225586, 0.021320703506469727, 0.021309440612792968, 0.021335039138793945, 0.02105958366394043, 0.021311487197875977, 0.021408767700195314, 0.02206208038330078, 0.021833728790283204, 0.021940223693847655, 0.022131711959838866, 0.022004735946655272, 0.022141952514648438, 0.022601728439331056, 0.022064128875732423, 0.022550527572631835, 0.022140928268432617, 0.022034431457519533, 0.022132736206054687, 0.022040576934814454, 0.02220134353637695, 0.022141952514648438, 0.02208563232421875, 0.02213478469848633, 0.022071296691894532, 0.022076416015625, 0.02202828788757324, 0.04676198577880859, 0.022076416015625, 0.022165504455566407, 0.022025215148925782, 0.022189056396484375, 0.022040576934814454, 0.022009855270385743, 0.02191155242919922, 0.021346303939819337, 0.021363712310791014, 0.021751808166503905, 0.022120447158813478, 0.022120447158813478, 0.022024192810058595, 0.022178815841674804, 0.022125568389892578, 0.0214466552734375, 0.021332992553710937, 0.021196832656860353, 0.021298143386840822, 0.02126131248474121, 0.021347328186035155, 0.021440511703491212, 0.021309440612792968, 0.02128998374938965, 0.02130636787414551, 0.02127462387084961, 0.021506048202514647, 0.021967872619628907, 0.02267852783203125, 0.022377471923828125, 0.022228992462158204, 0.021961727142333985, 0.02238057518005371, 0.022480863571166992, 0.022366207122802736, 0.022130687713623046, 0.022410240173339844, 0.022207487106323243, 0.02203647994995117, 0.02210406494140625, 0.022013952255249023, 0.022139904022216796, 0.02206208038330078, 0.022072320938110353, 0.022063104629516602, 0.022055936813354493, 0.022146047592163084, 0.021373952865600586, 0.021384191513061524, 0.0214517765045166, 0.021425151824951173, 0.021285888671875, 0.02165247917175293, 0.02213580894470215, 0.023093248367309572, 0.022350847244262697, 0.021918720245361328, 0.02126950454711914, 0.02141798400878906, 0.021342208862304687, 0.02129100799560547, 0.02127872085571289, 0.04537753677368164, 0.021362688064575194, 0.02168012809753418, 0.021445632934570313, 0.021562368392944335, 0.023380992889404296, 0.02294988822937012, 0.022619136810302733, 0.022610944747924806, 0.022618112564086915, 0.02253107261657715, 0.022600704193115235, 0.022537216186523438, 0.02262937545776367, 0.022548479080200197, 0.02261299133300781, 0.022598655700683593, 0.022401023864746093, 0.02266828727722168, 0.022421503067016603, 0.02246348762512207, 0.02240716743469238, 0.02251263999938965, 0.022780927658081054, 0.02266111946105957, 0.022450176239013672, 0.022542335510253905, 0.02250547218322754, 0.02253107261657715, 0.022403072357177735, 0.02250752067565918, 0.0224399356842041, 0.022510591506958007, 0.022391807556152343, 0.02243071937561035, 0.022434816360473633, 0.022566911697387695, 0.02246451187133789, 0.02249830436706543, 0.022501375198364256, 0.022556671142578123, 0.02242252731323242, 0.02248294448852539, 0.022494207382202147, 0.02264473533630371, 0.022535167694091796, 0.02271027183532715, 0.022768640518188478, 0.02254745674133301, 0.022559743881225586, 0.022617088317871094, 0.022611967086791994, 0.022586368560791017, 0.022608896255493165, 0.022517759323120116, 0.022565887451171874, 0.022566911697387695, 0.02251571273803711, 0.022542335510253905, 0.02262118339538574, 0.022635520935058592, 0.02247065544128418, 0.022392831802368163, 0.04779520034790039, 0.022597631454467772, 0.022558719635009765, 0.02245734405517578, 0.022569984436035157, 0.022467584609985353, 0.02248192024230957, 0.022486015319824217, 0.02246143913269043, 0.02248294448852539, 0.022742015838623047, 0.022492160797119142, 0.022527008056640624, 0.02257302474975586, 0.022579200744628908, 0.02274508857727051, 0.02345881652832031, 0.023000064849853515, 0.02261299133300781, 0.02246246337890625, 0.02247987174987793, 0.022460416793823244, 0.022501375198364256, 0.022377471923828125, 0.022587392807006838, 0.022579200744628908, 0.022409215927124023, 0.022552576065063477, 0.022545408248901368, 0.022526975631713866, 0.022583295822143554, 0.02240924835205078, 0.02254640007019043, 0.022492191314697266, 0.022502368927001953, 0.02255462455749512, 0.022578176498413087, 0.022533119201660155, 0.022567935943603516, 0.022429695129394533, 0.02269900894165039, 0.022544384002685547, 0.022562816619873048, 0.022451200485229493, 0.022467584609985353, 0.022537216186523438, 0.022425600051879883, 0.0225218563079834, 0.02251571273803711, 0.022526975631713866, 0.022443008422851563, 0.022509567260742186, 0.02247987174987793, 0.022441984176635742, 0.02262019157409668, 0.02277884864807129, 0.022132736206054687, 0.022345727920532226, 0.023601152420043944, 0.022863872528076173, 0.022647808074951172, 0.02253107261657715, 0.02285875129699707, 0.04789452743530274, 0.022619136810302733, 0.022617088317871094, 0.022478847503662108, 0.02246963119506836, 0.022590463638305663, 0.022616064071655274, 0.022584320068359375, 0.02249830436706543, 0.022614015579223632, 0.02253824043273926, 0.022575103759765625, 0.022889471054077147, 0.022776832580566408, 0.022552576065063477, 0.02266316795349121, 0.022708223342895507, 0.022755327224731444, 0.023163904190063478, 0.023447551727294923, 0.022767616271972657, 0.022598655700683593, 0.022563840866088865, 0.022608896255493165, 0.022380544662475587, 0.022608896255493165, 0.022518783569335937, 0.02244915199279785, 0.022536224365234375, 0.022473695755004883, 0.02252390480041504, 0.022567935943603516, 0.02270412826538086, 0.022590463638305663, 0.022618112564086915, 0.022537216186523438, 0.022511615753173828, 0.022529024124145508, 0.022445056915283205, 0.022580223083496095, 0.022503423690795898, 0.022542335510253905, 0.022564863204956053, 0.02249728012084961, 0.022584320068359375, 0.022634496688842775, 0.022619136810302733, 0.022861824035644532, 0.02261299133300781, 0.022543359756469726, 0.02264575958251953, 0.022572032928466795, 0.022399999618530272, 0.022557695388793944, 0.022495231628417968, 0.022634496688842775, 0.02253107261657715, 0.022536191940307617, 0.02249625587463379, 0.0224716796875, 0.022382591247558595, 0.022590463638305663, 0.02266012763977051, 0.0479508171081543, 0.022475776672363282, 0.02249625587463379, 0.02250547218322754, 0.02250547218322754, 0.022533119201660155, 0.02248089599609375, 0.022503423690795898, 0.022444032669067384, 0.022507551193237305, 0.022583263397216797, 0.02248089599609375, 0.022525951385498046, 0.022509567260742186, 0.022599679946899414, 0.022622207641601562, 0.022597631454467772, 0.022395904541015626, 0.022401023864746093, 0.02250547218322754, 0.022385663986206054, 0.022564863204956053, 0.02261299133300781, 0.02285260772705078, 0.022624256134033204, 0.0225167350769043, 0.022809600830078124, 0.022617088317871094, 0.022585344314575196, 0.022565887451171874, 0.022597631454467772, 0.022520832061767578, 0.022486015319824217, 0.022468608856201173, 0.022527999877929687, 0.022373376846313478, 0.02243891143798828, 0.022764575958251952, 0.022584287643432618, 0.022425600051879883, 0.02244607925415039, 0.022509567260742186, 0.02246451187133789, 0.0224532470703125, 0.022565887451171874, 0.02253926467895508, 0.02261299133300781, 0.022559743881225586, 0.022495264053344725, 0.022618080139160158, 0.022915071487426757, 0.02374553680419922, 0.022756351470947265, 0.02259660720825195, 0.022460416793823244, 0.022443008422851563, 0.02251571273803711, 0.022524927139282228, 0.022544384002685547, 0.022533119201660155, 0.022412288665771486, 0.022402048110961914, 0.02247987174987793]",tokens/s,44.522556664177316,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 976, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 866, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 583, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 339, in forward query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 835, in forward inputs_embeds = self.project_in(inputs_embeds) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py"", line 900, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py"", line 797, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py"", line 453, in forward attn_outputs = self.self_attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/tiiuae/falcon-rw-1b/e4b9872bb803165eb22f0a867d4e6a64d34fce19/modeling_falcon.py"", line 291, in forward fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31e6-05b44ba805f28e4c0e138304;c686977a-2616-4623-8b94-3e6c3eba03d2) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 317, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1158, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 1035, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 759, in forward self_attn_output, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/stablelm/modeling_stablelm.py"", line 317, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-66779473-454e3284237af0d811614cc0;0eaab056-8bce-4aca-a35c-b78fe6f4b710) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a318b-5ed968a505a8ffb3188f2884;7f2028b7-c9c7-4190-b9c4-d859288a2a07) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe48-2bbbeb3752f26bbe51221d0b;c39fa5d3-9854-4ec3-b91e-11ae47f29459) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1234.579456,1005.060096,0.0,358.612992,318.913024,s,21,0.18631392097473143,0.008872091474987211,0.000924222773129299,0.008636768341064454,0.008913311958312988,0.009082719802856446,0.012191328048706058,"[0.012968480110168457, 0.008715680122375488, 0.008649439811706542, 0.008653440475463868, 0.008547360420227051, 0.008636223793029784, 0.00852006435394287, 0.008628000259399414, 0.008578911781311035, 0.008601471900939942, 0.008556415557861328, 0.00872332763671875, 0.008607872009277344, 0.008735967636108398, 0.008636768341064454, 0.008690496444702149, 0.008913311958312988, 0.00862070369720459, 0.008642592430114747, 0.009082719802856446, 0.008604672431945801]",tokens/s,28854.526660566135,kWh,1.0560125751725471e-07,5.786425132276262e-08,2.2601994052193067e-07,3.89485449361948e-07,tokens/kWh,657277442.3778274,MB,1234.579456,1005.060096,0.0,358.612992,328.809472,s,21,10.324910400390625,0.4916624000186012,0.00544805281141677,0.49037628173828124,0.4947781066894531,0.5050043334960937,0.5079629760742188,"[0.50870263671875, 0.4930988159179687, 0.48819091796875, 0.4877738342285156, 0.4884733581542969, 0.48753598022460937, 0.488943115234375, 0.48698284912109374, 0.48965280151367185, 0.4866800231933594, 0.49037628173828124, 0.4889322509765625, 0.4905810546875, 0.48850283813476564, 0.4947781066894531, 0.49458047485351564, 0.4923774108886719, 0.4912550964355469, 0.49059906005859377, 0.49188916015625, 0.5050043334960937]",tokens/s,128.13670518147515,kWh,5.982969098851764e-06,3.278237305428838e-06,1.0177446715578437e-05,1.9438653119859037e-05,tokens/kWh,3240965.287643183,,s,1322,10.486766648292543,0.007932501246817355,0.0011072131495569327,0.007730175971984863,0.008002457332611084,0.008157030057907104,0.016623786373138428,"[0.011603967666625976, 0.010896384239196777, 0.009056256294250489, 0.008364031791687012, 0.007814144134521485, 0.007742464065551758, 0.0078919677734375, 0.007983104228973388, 0.007925759792327881, 0.007923711776733398, 0.007890944004058837, 0.00773529577255249, 0.00810598373413086, 0.007941120147705078, 0.007896063804626464, 0.00778547191619873, 0.0077209601402282715, 0.007713791847229004, 0.00801689624786377, 0.007914495944976807, 0.00795750379562378, 0.007947264194488525, 0.007755775928497314, 0.007707647800445557, 0.007718912124633789, 0.0077281279563903805, 0.007827455997467042, 0.0077281279563903805, 0.007740416049957275, 0.00769536018371582, 0.007731200218200684, 0.0077281279563903805, 0.0077578239440917966, 0.007866432189941406, 0.007905216217041015, 0.007806975841522217, 0.007733280181884765, 0.007848959922790527, 0.007939040184020996, 0.007905280113220215, 0.007763967990875244, 0.007702527999877929, 0.007729152202606201, 0.0076871681213378906, 0.007738368034362793, 0.007699456214904785, 0.00774451208114624, 0.007715839862823487, 0.007705599784851074, 0.007665664196014404, 0.007705599784851074, 0.007772160053253174, 0.00832307243347168, 0.008374272346496582, 0.008364031791687012, 0.008137727737426758, 0.008676351547241211, 0.008647680282592773, 0.008061951637268066, 0.007941120147705078, 0.007804927825927735, 0.007699456214904785, 0.016753664016723634, 0.007749631881713868, 0.007865344047546387, 0.007974912166595459, 0.008176639556884765, 0.008267775535583496, 0.008225791931152344, 0.008252415657043457, 0.008347647666931152, 0.008347647666931152, 0.008121343612670898, 0.00808140754699707, 0.00795750379562378, 0.007962624073028564, 0.007906303882598878, 0.00773529577255249, 0.007738368034362793, 0.007763967990875244, 0.007729152202606201, 0.0077506561279296875, 0.0077281279563903805, 0.007724031925201416, 0.007824384212493896, 0.007721983909606934, 0.007715839862823487, 0.007718912124633789, 0.007739391803741455, 0.00787660789489746, 0.007906303882598878, 0.0078919677734375, 0.007863296031951903, 0.0077281279563903805, 0.007847936153411865, 0.007693312168121338, 0.007734272003173828, 0.007749631881713868, 0.007703551769256592, 0.007707680225372314, 0.007713791847229004, 0.00769532823562622, 0.007730175971984863, 0.007704576015472412, 0.007762944221496582, 0.007703551769256592, 0.007713791847229004, 0.007731200218200684, 0.0077209601402282715, 0.007721983909606934, 0.0076912641525268555, 0.007723008155822754, 0.007797760009765625, 0.007732223987579345, 0.007863296031951903, 0.007836671829223632, 0.007715839862823487, 0.007715839862823487, 0.0076984319686889645, 0.007693312168121338, 0.007702527999877929, 0.007692287921905518, 0.007733248233795166, 0.007749631881713868, 0.007714816093444824, 0.016701440811157226, 0.007828479766845703, 0.0077547521591186525, 0.007716864109039307, 0.007701504230499268, 0.007740416049957275, 0.007717887878417969, 0.007721983909606934, 0.007828479766845703, 0.007741471767425537, 0.007738336086273194, 0.007726079940795898, 0.00785100793838501, 0.007770112037658691, 0.007825407981872558, 0.0077506561279296875, 0.0077506561279296875, 0.007702527999877929, 0.007725056171417236, 0.007788544178009033, 0.007708672046661377, 0.007740416049957275, 0.00773529577255249, 0.0077209601402282715, 0.007741439819335938, 0.007802879810333252, 0.00779366397857666, 0.007837696075439453, 0.007726079940795898, 0.007718912124633789, 0.007726079940795898, 0.007703584194183349, 0.007741407871246338, 0.007717887878417969, 0.007705599784851074, 0.007717887878417969, 0.007699456214904785, 0.007767039775848389, 0.007716864109039307, 0.007705599784851074, 0.007756800174713135, 0.007726079940795898, 0.007723008155822754, 0.00773632001876831, 0.0077333121299743655, 0.007885759830474853, 0.00774348783493042, 0.007778304100036621, 0.007740416049957275, 0.007734335899353027, 0.0077424001693725585, 0.007721983909606934, 0.007734272003173828, 0.007772160053253174, 0.007711743831634522, 0.007724031925201416, 0.007748608112335205, 0.007730175971984863, 0.0077281279563903805, 0.007767039775848389, 0.007715839862823487, 0.007709695816040039, 0.007712768077850342, 0.016694271087646484, 0.007724031925201416, 0.00775270414352417, 0.007763967990875244, 0.0077578239440917966, 0.007700479984283447, 0.0077281279563903805, 0.007669760227203369, 0.007833600044250488, 0.007715839862823487, 0.007715839862823487, 0.007729152202606201, 0.007787519931793213, 0.00773529577255249, 0.007711743831634522, 0.00774451208114624, 0.007697408199310303, 0.007684095859527588, 0.007714816093444824, 0.0076984319686889645, 0.007693312168121338, 0.007684095859527588, 0.007703551769256592, 0.007802879810333252, 0.007802879810333252, 0.007792640209197998, 0.00774348783493042, 0.007737343788146973, 0.007920639991760254, 0.007919616222381591, 0.007796735763549805, 0.007738431930541992, 0.007714752197265625, 0.0077209601402282715, 0.007729152202606201, 0.007764992237091065, 0.007960576057434082, 0.007717887878417969, 0.007692287921905518, 0.007679999828338623, 0.007699456214904785, 0.007741471767425537, 0.007738336086273194, 0.007733248233795166, 0.007748608112335205, 0.007697408199310303, 0.007705599784851074, 0.007739391803741455, 0.007689216136932373, 0.007723008155822754, 0.007702527999877929, 0.007710720062255859, 0.007726079940795898, 0.007693312168121338, 0.007741439819335938, 0.007713791847229004, 0.00773529577255249, 0.0076943359375, 0.007782400131225586, 0.007725056171417236, 0.007685120105743408, 0.007715839862823487, 0.007738368034362793, 0.016563199996948243, 0.0077547521591186525, 0.007727104187011719, 0.007756800174713135, 0.0076984319686889645, 0.00783564805984497, 0.007682047843933106, 0.00773632001876831, 0.0076984319686889645, 0.007762944221496582, 0.007725056171417236, 0.007751679897308349, 0.007710720062255859, 0.007872511863708496, 0.007729152202606201, 0.00774451208114624, 0.007726079940795898, 0.007779327869415284, 0.00779366397857666, 0.007710720062255859, 0.007713791847229004, 0.007693312168121338, 0.007716864109039307, 0.007709695816040039, 0.007707647800445557, 0.007715839862823487, 0.007712768077850342, 0.0076871681213378906, 0.00773529577255249, 0.007710720062255859, 0.007712768077850342, 0.007706624031066894, 0.0076902399063110355, 0.007711743831634522, 0.007706624031066894, 0.007689216136932373, 0.007766016006469726, 0.0076912641525268555, 0.007726079940795898, 0.007915520191192627, 0.00795750379562378, 0.007804927825927735, 0.007710720062255859, 0.007705599784851074, 0.007717887878417969, 0.007700479984283447, 0.0076943359375, 0.008194047927856446, 0.008091648101806641, 0.007954432010650634, 0.00774451208114624, 0.007711743831634522, 0.007692319869995117, 0.007700448036193848, 0.007789567947387695, 0.007700479984283447, 0.007714816093444824, 0.007712768077850342, 0.007706624031066894, 0.007718912124633789, 0.007716864109039307, 0.0077292160987854006, 0.007698368072509765, 0.016657407760620118, 0.007730175971984863, 0.007724031925201416, 0.0078438401222229, 0.00769536018371582, 0.007745535850524903, 0.007696383953094482, 0.007697408199310303, 0.007726079940795898, 0.00785203218460083, 0.00775270414352417, 0.007676928043365478, 0.007704576015472412, 0.00774348783493042, 0.007731200218200684, 0.007701504230499268, 0.007804927825927735, 0.007708672046661377, 0.007710720062255859, 0.007707647800445557, 0.007711743831634522, 0.007700479984283447, 0.007700479984283447, 0.007699456214904785, 0.007712768077850342, 0.007697472095489502, 0.007697343826293945, 0.0076871681213378906, 0.007741471767425537, 0.007927775859832764, 0.00779366397857666, 0.00778547191619873, 0.007738368034362793, 0.00773529577255249, 0.007708672046661377, 0.007841792106628418, 0.007705599784851074, 0.007713791847229004, 0.007704576015472412, 0.007732223987579345, 0.007716928005218506, 0.007747519969940186, 0.007733248233795166, 0.007702527999877929, 0.007742464065551758, 0.007692287921905518, 0.007708672046661377, 0.007714848041534424, 0.007716832160949707, 0.007709695816040039, 0.007716864109039307, 0.0077209601402282715, 0.007709695816040039, 0.0077506561279296875, 0.0077578239440917966, 0.007712768077850342, 0.007811071872711181, 0.007751679897308349, 0.007782400131225586, 0.007685120105743408, 0.007799808025360107, 0.007716864109039307, 0.007768064022064209, 0.016578559875488282, 0.0077199358940124516, 0.007724031925201416, 0.007730175971984863, 0.007751679897308349, 0.007838719844818116, 0.007721983909606934, 0.007738368034362793, 0.007946239948272706, 0.007903232097625732, 0.007706624031066894, 0.007726079940795898, 0.007748608112335205, 0.007771135807037354, 0.007941120147705078, 0.00794220781326294, 0.007843776226043701, 0.007714816093444824, 0.007815167903900147, 0.007742464065551758, 0.007722015857696533, 0.007703519821166992, 0.007739391803741455, 0.007772160053253174, 0.007702527999877929, 0.007701504230499268, 0.007732223987579345, 0.007699456214904785, 0.008051775932312011, 0.008126399993896484, 0.007746560096740723, 0.0077281279563903805, 0.007717887878417969, 0.0076943359375, 0.0076871681213378906, 0.007692287921905518, 0.007706624031066894, 0.007808000087738037, 0.0077209601402282715, 0.007701504230499268, 0.007711743831634522, 0.0078438401222229, 0.007739391803741455, 0.007706624031066894, 0.007716864109039307, 0.007707647800445557, 0.007768064022064209, 0.007886847972869874, 0.007692287921905518, 0.007712768077850342, 0.007726079940795898, 0.007709695816040039, 0.007706624031066894, 0.00773529577255249, 0.007704576015472412, 0.00773529577255249, 0.007718912124633789, 0.007711743831634522, 0.007676928043365478, 0.0076871681213378906, 0.007708703994750977, 0.007705567836761474, 0.007707647800445557, 0.016647167205810547, 0.007724031925201416, 0.007713791847229004, 0.007688191890716553, 0.007745600223541259, 0.007724991798400879, 0.007704576015472412, 0.007768064022064209, 0.007717887878417969, 0.007707647800445557, 0.007766016006469726, 0.007725056171417236, 0.00769536018371582, 0.00769536018371582, 0.007714816093444824, 0.007707647800445557, 0.007798783779144287, 0.00790118408203125, 0.00781004810333252, 0.0076574721336364745, 0.007778304100036621, 0.007678976058959961, 0.007707647800445557, 0.0077209601402282715, 0.007697408199310303, 0.007703551769256592, 0.0076902399063110355, 0.007699456214904785, 0.007769087791442871, 0.0077844481468200685, 0.007714816093444824, 0.007693312168121338, 0.0077209601402282715, 0.007701504230499268, 0.0077578239440917966, 0.007845888137817383, 0.007723008155822754, 0.007715839862823487, 0.007715839862823487, 0.007713791847229004, 0.0076943359375, 0.007692287921905518, 0.007703551769256592, 0.0076943359375, 0.007688191890716553, 0.007770112037658691, 0.0076912641525268555, 0.007683072090148926, 0.007733248233795166, 0.0076984319686889645, 0.007782400131225586, 0.007789567947387695, 0.007717919826507568, 0.007701471805572509, 0.007716864109039307, 0.007721983909606934, 0.007721983909606934, 0.007702527999877929, 0.0077547521591186525, 0.007731200218200684, 0.0076943359375, 0.007699456214904785, 0.007689216136932373, 0.016616479873657226, 0.007721951961517334, 0.0076984319686889645, 0.007682079792022705, 0.0077536959648132325, 0.007910399913787843, 0.0077199358940124516, 0.007729152202606201, 0.007723008155822754, 0.007775231838226319, 0.007875584125518798, 0.007718912124633789, 0.007697408199310303, 0.007702527999877929, 0.007699456214904785, 0.007777279853820801, 0.008135680198669434, 0.007952383995056152, 0.007699456214904785, 0.007688191890716553, 0.0076912641525268555, 0.007688191890716553, 0.007723008155822754, 0.007730175971984863, 0.007710720062255859, 0.00773529577255249, 0.007718912124633789, 0.0076912641525268555, 0.007692319869995117, 0.0076973757743835445, 0.007742464065551758, 0.007873536109924317, 0.007718912124633789, 0.008615936279296875, 0.007956480026245117, 0.007966720104217529, 0.007947264194488525, 0.007872511863708496, 0.0077608962059021, 0.007699456214904785, 0.007715839862823487, 0.00769536018371582, 0.007725056171417236, 0.007732223987579345, 0.007871488094329833, 0.007798783779144287, 0.0077209601402282715, 0.007711743831634522, 0.007717887878417969, 0.0077281279563903805, 0.007717887878417969, 0.007701504230499268, 0.007701504230499268, 0.007703551769256592, 0.007751679897308349, 0.007730175971984863, 0.007711743831634522, 0.0076984319686889645, 0.007762944221496582, 0.007710720062255859, 0.00773632001876831, 0.007712800025939942, 0.007716832160949707, 0.01661030387878418, 0.007712768077850342, 0.007726079940795898, 0.007716864109039307, 0.007732223987579345, 0.007729152202606201, 0.007705599784851074, 0.007732223987579345, 0.007708672046661377, 0.007714816093444824, 0.007675903797149658, 0.007910399913787843, 0.007725056171417236, 0.007729152202606201, 0.007730175971984863, 0.007763967990875244, 0.007731200218200684, 0.007730175971984863, 0.0076871681213378906, 0.007688191890716553, 0.007684095859527588, 0.007711743831634522, 0.0076687359809875484, 0.007706624031066894, 0.007774208068847656, 0.007678976058959961, 0.007700479984283447, 0.0076902399063110355, 0.007696447849273681, 0.007699391841888428, 0.007823359966278077, 0.007854080200195313, 0.007693312168121338, 0.007725056171417236, 0.007701504230499268, 0.00783564805984497, 0.007677951812744141, 0.007689216136932373, 0.007685120105743408, 0.007682047843933106, 0.007734272003173828, 0.007696383953094482, 0.007682047843933106, 0.007710720062255859, 0.007696383953094482, 0.007702527999877929, 0.0076984319686889645, 0.00775270414352417, 0.007681024074554443, 0.007682047843933106, 0.007686143875122071, 0.007697408199310303, 0.0077199358940124516, 0.007731200218200684, 0.007727104187011719, 0.007754784107208252, 0.0077209281921386715, 0.007737343788146973, 0.007764992237091065, 0.007710720062255859, 0.007709695816040039, 0.007701504230499268, 0.007770112037658691, 0.016547840118408205, 0.007730175971984863, 0.007739391803741455, 0.007730175971984863, 0.007729152202606201, 0.00789299201965332, 0.0077619199752807615, 0.007703551769256592, 0.007711743831634522, 0.007745535850524903, 0.007731200218200684, 0.0078438401222229, 0.007710720062255859, 0.0077619199752807615, 0.007710720062255859, 0.007699456214904785, 0.0077281279563903805, 0.008186944007873535, 0.008094655990600585, 0.007806975841522217, 0.007730175971984863, 0.00773529577255249, 0.007699456214904785, 0.0077199358940124516, 0.007721983909606934, 0.007737343788146973, 0.007670783996582031, 0.007726079940795898, 0.007745535850524903, 0.007716864109039307, 0.007806975841522217, 0.007882751941680909, 0.007704576015472412, 0.007703551769256592, 0.0077209601402282715, 0.007707647800445557, 0.007676928043365478, 0.008251456260681153, 0.008133567810058594, 0.007824384212493896, 0.007700479984283447, 0.0076984319686889645, 0.0077209601402282715, 0.007747583866119385, 0.007709695816040039, 0.007673855781555176, 0.007715839862823487, 0.007702527999877929, 0.007711743831634522, 0.008233983993530274, 0.007969791889190675, 0.007902207851409913, 0.00793497610092163, 0.007878655910491944, 0.007746560096740723, 0.007704576015472412, 0.007705599784851074, 0.007717887878417969, 0.007718912124633789, 0.007718944072723389, 0.0077291841506958005, 0.00770246410369873, 0.007727104187011719, 0.01676288032531738, 0.007725056171417236, 0.007721983909606934, 0.007742464065551758, 0.00769536018371582, 0.007717887878417969, 0.007709760189056397, 0.007786431789398193, 0.007712768077850342, 0.007718912124633789, 0.007725056171417236, 0.007823359966278077, 0.007729152202606201, 0.007708672046661377, 0.007699456214904785, 0.007780352115631104, 0.007726079940795898, 0.007726079940795898, 0.007740416049957275, 0.007729152202606201, 0.007700479984283447, 0.007715839862823487, 0.007773183822631836, 0.007751679897308349, 0.0077199358940124516, 0.007716864109039307, 0.007704576015472412, 0.007727104187011719, 0.007710720062255859, 0.007731200218200684, 0.007723008155822754, 0.007889920234680176, 0.00790937614440918, 0.008008735656738281, 0.00790012788772583, 0.007946239948272706, 0.00785100793838501, 0.007679999828338623, 0.007686143875122071, 0.007853055953979492, 0.00769536018371582, 0.007714816093444824, 0.007721983909606934, 0.007976960182189942, 0.007935999870300293, 0.007832575798034667, 0.007708672046661377, 0.007726079940795898, 0.007707647800445557, 0.007676928043365478, 0.007708672046661377, 0.00773632001876831, 0.007734272003173828, 0.007734272003173828, 0.007725056171417236, 0.007711743831634522, 0.00774451208114624, 0.007724031925201416, 0.00780185604095459, 0.007727104187011719, 0.0077281279563903805, 0.007778304100036621, 0.007770112037658691, 0.01660211181640625, 0.007740416049957275, 0.007758848190307617, 0.007717887878417969, 0.0076984319686889645, 0.007717887878417969, 0.007723008155822754, 0.007696383953094482, 0.00782643222808838, 0.0076902399063110355, 0.007689216136932373, 0.007712768077850342, 0.00794316816329956, 0.007795711994171142, 0.007724031925201416, 0.007756800174713135, 0.007738368034362793, 0.007890944004058837, 0.007928832054138184, 0.007711743831634522, 0.007721983909606934, 0.007827455997467042, 0.007703551769256592, 0.007713791847229004, 0.007709695816040039, 0.007723008155822754, 0.007705599784851074, 0.007701504230499268, 0.00779366397857666, 0.007730175971984863, 0.007710720062255859, 0.007721983909606934, 0.00773632001876831, 0.007712768077850342, 0.007732223987579345, 0.007749695777893067, 0.007699391841888428, 0.007712768077850342, 0.00769536018371582, 0.007714816093444824, 0.007675903797149658, 0.007679999828338623, 0.007706624031066894, 0.007713791847229004, 0.007707647800445557, 0.007725056171417236, 0.0077199358940124516, 0.0077199358940124516, 0.007711743831634522, 0.007701504230499268, 0.008041472434997558, 0.007925759792327881, 0.007964672088623047, 0.008812543869018554, 0.00818892765045166, 0.007971839904785156, 0.007931903839111328, 0.00797388792037964, 0.007821311950683594, 0.00773632001876831, 0.007737343788146973, 0.0076984319686889645, 0.0077506561279296875, 0.016773120880126953, 0.007713791847229004, 0.007703551769256592, 0.007712768077850342, 0.0077199358940124516, 0.007724031925201416, 0.007887872219085693, 0.007883776187896728, 0.007776256084442139, 0.0076943359375, 0.007706655979156494, 0.007736288070678711, 0.007830527782440186, 0.0077619199752807615, 0.0077281279563903805, 0.007740416049957275, 0.007749631881713868, 0.007731200218200684, 0.007759871959686279, 0.007758848190307617, 0.007709695816040039, 0.0077209601402282715, 0.007706624031066894, 0.007707647800445557, 0.007697408199310303, 0.007702527999877929, 0.0077209601402282715, 0.00769536018371582, 0.007708672046661377, 0.007721983909606934, 0.008154111862182617, 0.008143872261047362, 0.007910399913787843, 0.007795711994171142, 0.007726079940795898, 0.007689280033111572, 0.007698368072509765, 0.007763967990875244, 0.007821311950683594, 0.007713791847229004, 0.007711743831634522, 0.007733248233795166, 0.007748608112335205, 0.007656447887420655, 0.00775270414352417, 0.007725056171417236, 0.0077281279563903805, 0.007710720062255859, 0.007682047843933106, 0.007711743831634522, 0.007732223987579345, 0.007700479984283447, 0.007749631881713868, 0.00773632001876831, 0.007706624031066894, 0.007697408199310303, 0.00774348783493042, 0.00773632001876831, 0.007778304100036621, 0.00773632001876831, 0.007763967990875244, 0.007724031925201416, 0.007725056171417236, 0.01663488006591797, 0.0077281279563903805, 0.0077292160987854006, 0.00771782398223877, 0.007703551769256592, 0.007724031925201416, 0.007721983909606934, 0.007717887878417969, 0.007869440078735352, 0.007717887878417969, 0.007703551769256592, 0.007697408199310303, 0.00781824016571045, 0.007864319801330566, 0.007912447929382324, 0.007799808025360107, 0.007717887878417969, 0.007748608112335205, 0.007753727912902832, 0.007782400131225586, 0.007894015789031983, 0.007772160053253174, 0.007773183822631836, 0.007839744091033935, 0.007825407981872558, 0.007860223770141601, 0.007705599784851074, 0.007798783779144287, 0.007741439819335938, 0.007748608112335205, 0.007869440078735352, 0.0077209601402282715, 0.007725056171417236, 0.007706624031066894, 0.0076984319686889645, 0.007726079940795898, 0.007718912124633789, 0.007711743831634522, 0.0077547521591186525, 0.0077281279563903805, 0.007716864109039307, 0.0076943359375, 0.007706624031066894, 0.007708735942840576, 0.008207296371459961, 0.00828006362915039, 0.008375295639038086, 0.008964159965515136, 0.008193984031677246, 0.008244223594665527, 0.008076288223266602, 0.008083456039428711, 0.008111104011535645, 0.008119296073913575, 0.007963647842407226, 0.007763967990875244, 0.007849984169006348, 0.007905344009399413, 0.00797382402420044, 0.007958528041839599, 0.007767039775848389, 0.007678976058959961, 0.007705599784851074, 0.016735231399536133, 0.007749631881713868, 0.007739391803741455, 0.007732223987579345, 0.007741439819335938, 0.007729152202606201, 0.007734272003173828, 0.007770112037658691, 0.007745535850524903, 0.007739391803741455, 0.0076912641525268555, 0.007707680225372314, 0.0077803201675415035, 0.007710720062255859, 0.007718912124633789, 0.00785920000076294, 0.007742464065551758, 0.007711743831634522, 0.007757855892181397, 0.007923679828643799, 0.00797388792037964, 0.007967743873596191, 0.007703551769256592, 0.007781375885009765, 0.007738368034362793, 0.0077199358940124516, 0.0077199358940124516, 0.00773632001876831, 0.007770112037658691, 0.007689216136932373, 0.007716864109039307, 0.00784281587600708, 0.007911424160003662, 0.007947264194488525, 0.007904255867004394, 0.00787766408920288, 0.007705567836761474, 0.007828479766845703, 0.007711743831634522, 0.007731200218200684, 0.007724031925201416, 0.007885824203491211, 0.0077281279563903805, 0.007792640209197998, 0.00790937614440918, 0.007910399913787843, 0.00787660789489746, 0.007709695816040039, 0.007669760227203369, 0.007706655979156494, 0.007722976207733154, 0.007747583866119385, 0.009075712203979493, 0.0091146240234375, 0.008141823768615723, 0.007981056213378907, 0.007968768119812012, 0.007946239948272706, 0.008045568466186523, 0.007699456214904785, 0.00791756820678711, 0.00808243179321289, 0.007932928085327149, 0.01699430465698242, 0.007724031925201416, 0.007711743831634522, 0.007674880027770996, 0.007733248233795166, 0.007865344047546387, 0.008018943786621094, 0.007984127998352051, 0.007921664237976075, 0.0078919677734375, 0.007894015789031983, 0.007947264194488525, 0.0077209601402282715, 0.007711743831634522, 0.007716864109039307, 0.007726079940795898, 0.008037376403808593, 0.008160256385803222, 0.008114175796508789, 0.007946239948272706, 0.007955455780029297, 0.007880703926086426, 0.007709695816040039, 0.0076902399063110355, 0.007707647800445557, 0.007819263935089112, 0.0077066879272460935, 0.0077127041816711425, 0.007713791847229004, 0.007715839862823487, 0.007741439819335938, 0.007872511863708496, 0.008002559661865234, 0.007911424160003662, 0.007808000087738037, 0.007950335979461669, 0.007971839904785156, 0.007889920234680176, 0.007895040035247802, 0.007922688007354736, 0.007844863891601562, 0.007697408199310303, 0.007679999828338623, 0.007716864109039307, 0.007702527999877929, 0.0077547521591186525, 0.007723008155822754, 0.007700479984283447, 0.0076943359375, 0.007708672046661377, 0.00769536018371582, 0.007707647800445557, 0.0076902399063110355, 0.0076943359375, 0.007699456214904785, 0.007935999870300293, 0.007902207851409913, 0.007918591976165772, 0.00793398380279541, 0.007732192039489746, 0.007676928043365478, 0.007679999828338623, 0.007780352115631104, 0.016642080307006837, 0.007711711883544922, 0.007697408199310303, 0.007676928043365478, 0.007706624031066894, 0.007731200218200684, 0.007718976020812988, 0.00803116798400879, 0.007894015789031983, 0.007731200218200684, 0.007890944004058837, 0.008027135848999023, 0.008517631530761719, 0.007713791847229004, 0.007714816093444824, 0.00786947202682495, 0.007754720211029052, 0.007733248233795166, 0.007702527999877929, 0.007700479984283447, 0.007743519783020019, 0.007692255973815918, 0.007667712211608887, 0.007686143875122071, 0.007748608112335205, 0.007677951812744141, 0.007717887878417969, 0.007771135807037354, 0.007829504013061523, 0.007766016006469726, 0.00800153636932373, 0.007898111820220948, 0.007880703926086426, 0.007862271785736084, 0.007678016185760498, 0.007819200038909912, 0.007671840190887451, 0.007682015895843506, 0.007673855781555176, 0.007704576015472412, 0.0076871681213378906, 0.007699456214904785, 0.007682047843933106, 0.007710720062255859, 0.007718944072723389, 0.0076902079582214355, 0.007753727912902832, 0.007715839862823487, 0.007767039775848389, 0.007705599784851074, 0.007976960182189942, 0.007907328128814697, 0.007895040035247802, 0.007870463848114014, 0.008042495727539062, 0.007890975952148438, 0.007929823875427245, 0.007815167903900147, 0.008281087875366211, 0.007686143875122071, 0.007705599784851074, 0.007724031925201416, 0.00773632001876831, 0.01665023994445801, 0.0076984319686889645, 0.007708672046661377, 0.007708672046661377, 0.007717887878417969, 0.007837696075439453, 0.007702527999877929, 0.00774451208114624, 0.007753727912902832, 0.007738368034362793, 0.007723008155822754, 0.007798783779144287, 0.007696383953094482, 0.007700479984283447, 0.007701504230499268, 0.007718912124633789, 0.007715839862823487, 0.007762944221496582, 0.00785203218460083, 0.007715839862823487, 0.007769087791442871, 0.007730175971984863, 0.007700479984283447, 0.0077199358940124516, 0.007688191890716553, 0.007705599784851074, 0.0076943359375, 0.007681024074554443, 0.0076984319686889645, 0.007692287921905518, 0.007710720062255859, 0.007753727912902832, 0.00773529577255249, 0.007738368034362793, 0.007740416049957275, 0.007705599784851074, 0.0077281279563903805, 0.007871488094329833, 0.007733248233795166, 0.007703551769256592, 0.007703551769256592, 0.007701504230499268, 0.007677951812744141, 0.007689216136932373, 0.007725056171417236, 0.007723008155822754, 0.007781375885009765, 0.007701504230499268, 0.007701504230499268, 0.00852684783935547, 0.0082227201461792, 0.007988224029541016, 0.00809881591796875, 0.007950335979461669, 0.007958528041839599, 0.007836671829223632, 0.007874559879302979, 0.007763967990875244, 0.007967743873596191, 0.0078438401222229, 0.007737343788146973, 0.007927807807922363, 0.00799948787689209, 0.017304576873779298, 0.0077199358940124516, 0.007718912124633789, 0.007681024074554443, 0.0076871681213378906, 0.007727104187011719, 0.0076984319686889645, 0.007781375885009765, 0.007724031925201416, 0.007789567947387695, 0.007988224029541016, 0.008054783821105957, 0.007925759792327881, 0.00790937614440918, 0.00774348783493042, 0.007699456214904785, 0.00774348783493042, 0.007709695816040039, 0.007708672046661377, 0.007677951812744141, 0.00774348783493042, 0.007771135807037354, 0.007776256084442139, 0.007746560096740723, 0.00773529577255249, 0.007714816093444824, 0.007729152202606201, 0.00793497610092163, 0.007860223770141601, 0.007898111820220948, 0.007904255867004394, 0.007738368034362793, 0.00795244789123535, 0.007912415981292725, 0.0076861119270324706, 0.00797388792037964, 0.007954432010650634, 0.007786496162414551, 0.007844863891601562, 0.007825407981872558, 0.007705599784851074, 0.007705599784851074, 0.007717887878417969, 0.00774451208114624, 0.007718912124633789, 0.0076912641525268555, 0.00773632001876831, 0.007782400131225586, 0.007776256084442139, 0.007937024116516114, 0.007910399913787843, 0.007729152202606201, 0.00830668830871582, 0.00813158416748047, 0.007849984169006348, 0.007703551769256592, 0.007739391803741455, 0.007732223987579345, 0.008006655693054199, 0.007778304100036621, 0.00773632001876831, 0.0076984319686889645, 0.007703551769256592, 0.016625728607177735, 0.007673791885375977, 0.00773529577255249, 0.0076912641525268555, 0.007684095859527588, 0.007864319801330566, 0.007684095859527588, 0.007782400131225586, 0.008040448188781739, 0.007699456214904785, 0.007816192150115966, 0.00787660789489746, 0.007726079940795898, 0.0076912641525268555, 0.007709695816040039, 0.007703551769256592, 0.007727104187011719, 0.007713791847229004, 0.007734272003173828, 0.007740416049957275, 0.007954432010650634, 0.008110079765319824, 0.008164352416992187, 0.008158207893371582, 0.008134655952453614, 0.008079360008239746, 0.008034303665161132, 0.008069120407104492, 0.008080384254455567, 0.008045568466186523, 0.008086527824401855, 0.008064000129699708, 0.008039423942565918, 0.008177663803100586, 0.008115232467651367, 0.008054752349853516, 0.008057855606079101, 0.00807423973083496, 0.008069120407104492, 0.00808243179321289, 0.008110079765319824, 0.008058879852294922, 0.008219648361206054, 0.008138751983642578, 0.008049663543701171, 0.008075263977050781, 0.008070143699645996, 0.008043519973754883, 0.008030207633972167, 0.008031231880187988, 0.008062975883483887, 0.008071167945861817, 0.008064000129699708, 0.008061951637268066, 0.00809881591796875, 0.008316927909851075, 0.008250368118286134, 0.00838963222503662, 0.008570879936218261, 0.008749055862426757, 0.008308735847473145, 0.008157183647155761, 0.0081080961227417]",tokens/s,126.06364233490866,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a301f-1aea27b21afd0d8f4e84400d;08c2dbac-483f-4b9d-9e9f-68dc35158e2d) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 251, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1914, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2651, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1174, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 978, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 718, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 326, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1204, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 1004, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/095ad5d22b0db76d20dbaefe7fa6ec7eb5da8b28/modeling_internlm2.py"", line 308, in forward qkv_states = self.wqkv(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b7a-2755da975feb87e13e07c3da;f9075860-88f1-44d3-894f-79e423f0a46b) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a339b-4e2944cd3cb64e3843501591;69760cc0-a4e3-4bd0-8cb8-2e91af980354) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33f2-591134c028f4334029945cca;604754bc-c78c-43c3-ac52-fcff48b56cce) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3299-4f1b58b964b9706b69f298a2;37e46097-7480-48db-9783-f2f235f2d575) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30e8-389f0e8918297f4041a2f3e7;ffc0239d-69c0-4830-b312-0b53b0e2dc3f) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c20-0de236406a8f9f8e2bba9f65;e2af6918-1079-4393-84c3-8acc92217d04) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32ef-67989712236c5f165a7174dd;bfb7058c-47ba-4280-8dec-b5160a933ae7) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3089-2ef0c7ec37e7761d6a7ec27e;69a961d1-bd6a-4faf-87e8-889741e8142d) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34a0-0702dee310f0f0fb180238e0;dc05193b-ef45-4abd-9d8d-d5c7534e91e1) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fcc-593e7c053ab4d4842d13e45c;dd31e934-d504-4c9b-9cd8-8dc01864910c) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc391-6d00b3ab159c95792f93c738;828cc785-3d6f-4a60-8622-37a6eb67415f) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3e7-1ae27ad36e8ab0c863fcf096;9c1ee3d9-31b9-4895-9056-d16a0e676cd7) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2e9-29d878d229165c4546c4bd4a;a60d3e33-c8ee-4f03-92f0-f5ed20d534dd) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc33b-29ce4e6865a5a5055b2fdda7;e9357461-2cfb-446f-9b50-23dd5f022080) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f1e-2867340e2f30044e285a5870;e00d7a61-7636-4882-8ca7-7f7006a7cb51) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ec9-493b88d97d3439df3e5d7325;c7dd4131-22c5-438c-9cd8-9083553d7eab) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1254.875136,2645.03296,0.0,1998.585856,1692.285952,s,10,0.24200246238708498,0.0242002462387085,0.000612125689531125,0.024116847991943358,0.02477244110107422,0.025168700790405275,0.025485708541870117,"[0.025564960479736328, 0.023542015075683594, 0.024684383392333985, 0.023764448165893556, 0.024505792617797853, 0.02447939109802246, 0.023851680755615234, 0.024382015228271485, 0.02353171157836914, 0.023696063995361328]",tokens/s,10578.40475980471,kWh,2.807038850191559e-07,1.537483499955637e-07,8.364053933254355e-07,1.270857628340155e-06,tokens/kWh,201438771.9687823,MB,1255.440384,2645.03296,0.0,1998.585856,1740.091904,s,10,13.98928271484375,1.398928271484375,0.009250422421391818,1.3987031860351564,1.4114088623046874,1.4117007690429688,1.4119342944335938,"[1.4011539306640626, 1.41199267578125, 1.4039649658203126, 1.3913382568359376, 1.411343994140625, 1.4078441162109374, 1.3920347900390626, 1.39625244140625, 1.3845965576171875, 1.388760986328125]",tokens/s,45.034474807741184,kWh,1.6551164256870412e-05,9.07002460188642e-06,3.349914985287281e-05,5.912033871162965e-05,tokens/kWh,1065623.1234955217,,s,629,14.182389726638794,0.022547519438217475,0.0029122915922720257,0.022378463745117188,0.022657024002075195,0.02286632957458496,0.04563361907958984,"[0.02266111946105957, 0.022467584609985353, 0.022542335510253905, 0.02246451187133789, 0.022589439392089843, 0.022599679946899414, 0.022360063552856444, 0.02248806381225586, 0.02263039970397949, 0.022633472442626954, 0.02250547218322754, 0.02248294448852539, 0.022863872528076173, 0.023409664154052736, 0.023657472610473632, 0.023362560272216795, 0.023547903060913086, 0.023163904190063478, 0.02222591972351074, 0.02307379150390625, 0.02348646354675293, 0.022845439910888672, 0.02269491195678711, 0.022561792373657227, 0.02246451187133789, 0.022376447677612304, 0.022429695129394533, 0.02270207977294922, 0.022359039306640623, 0.02206208038330078, 0.021753856658935547, 0.021375999450683594, 0.02146406364440918, 0.021550079345703126, 0.0216944637298584, 0.021514240264892577, 0.021566463470458985, 0.02152448081970215, 0.021552127838134767, 0.021534719467163087, 0.021593088150024413, 0.02186649513244629, 0.022435840606689454, 0.02308095932006836, 0.02249113655090332, 0.021839872360229492, 0.021519359588623048, 0.021755903244018555, 0.02161667251586914, 0.021600223541259764, 0.02187264060974121, 0.022023168563842774, 0.021592063903808592, 0.021515264511108398, 0.021596160888671875, 0.021569536209106444, 0.021582847595214845, 0.021592063903808592, 0.02168422317504883, 0.022158336639404298, 0.02163609504699707, 0.02167193603515625, 0.04547686386108398, 0.021605375289916993, 0.021596160888671875, 0.021605375289916993, 0.021635072708129883, 0.022408191680908202, 0.022987775802612305, 0.022617088317871094, 0.022552576065063477, 0.02248089599609375, 0.022358015060424806, 0.0216627197265625, 0.02272768020629883, 0.022161407470703123, 0.021736448287963867, 0.021808128356933593, 0.021787647247314454, 0.021617664337158202, 0.022231039047241212, 0.02268876838684082, 0.022399999618530272, 0.022468608856201173, 0.02253004837036133, 0.02234982490539551, 0.022375423431396483, 0.022451200485229493, 0.02247270393371582, 0.022508544921875, 0.025584640502929686, 0.023030784606933592, 0.023574527740478517, 0.022610944747924806, 0.02266726493835449, 0.02264473533630371, 0.022552576065063477, 0.022425600051879883, 0.022433792114257813, 0.022609920501708985, 0.022460447311401368, 0.022352863311767578, 0.02246963119506836, 0.022428672790527345, 0.022574079513549804, 0.022376447677612304, 0.02248908805847168, 0.0224849910736084, 0.02279734420776367, 0.02256585693359375, 0.022495231628417968, 0.02246246337890625, 0.02231091117858887, 0.021749759674072267, 0.02169036865234375, 0.021618688583374023, 0.0221214714050293, 0.022444032669067384, 0.022716415405273437, 0.022600704193115235, 0.02253824043273926, 0.022766592025756836, 0.022545408248901368, 0.022372352600097657, 0.02264678382873535, 0.04748287963867188, 0.02241535949707031, 0.02246553611755371, 0.022574079513549804, 0.022425600051879883, 0.02245529556274414, 0.02246963119506836, 0.022468608856201173, 0.022520832061767578, 0.0224901123046875, 0.022559743881225586, 0.022330368041992187, 0.022426624298095704, 0.0227194881439209, 0.022578176498413087, 0.02250649642944336, 0.02247372817993164, 0.022339584350585938, 0.022377504348754882, 0.022376415252685546, 0.022382591247558595, 0.022501375198364256, 0.02234060859680176, 0.022625280380249024, 0.022508544921875, 0.022467584609985353, 0.022540288925170897, 0.022409215927124023, 0.022548479080200197, 0.022412288665771486, 0.022406143188476564, 0.02249830436706543, 0.022558719635009765, 0.02249318313598633, 0.022424575805664062, 0.022597631454467772, 0.022608896255493165, 0.022436864852905275, 0.02167091178894043, 0.02172211265563965, 0.02166374397277832, 0.021729280471801758, 0.02165247917175293, 0.021629951477050782, 0.02152448081970215, 0.021518335342407227, 0.021576704025268553, 0.022010879516601564, 0.02165657615661621, 0.02165247917175293, 0.021729280471801758, 0.021611520767211914, 0.02169139289855957, 0.02255564880371094, 0.02243891143798828, 0.022483968734741212, 0.0224849910736084, 0.022394880294799805, 0.02249932861328125, 0.022280191421508787, 0.022421503067016603, 0.022451200485229493, 0.02246143913269043, 0.045770751953125, 0.021746688842773438, 0.021728256225585937, 0.021703680038452147, 0.021639167785644533, 0.021617664337158202, 0.02170163154602051, 0.02166067123413086, 0.021626880645751953, 0.02168422317504883, 0.02163711929321289, 0.021738496780395508, 0.02183475112915039, 0.021724159240722657, 0.021720064163208007, 0.021741567611694337, 0.02190540885925293, 0.022610944747924806, 0.02248089599609375, 0.021700607299804688, 0.02162073516845703, 0.02164735984802246, 0.02163609504699707, 0.02162483215332031, 0.021567487716674806, 0.02170675277709961, 0.021619712829589844, 0.021646335601806642, 0.021635072708129883, 0.02166988754272461, 0.021695487976074217, 0.021622783660888673, 0.02164121627807617, 0.02167398452758789, 0.02165043258666992, 0.02211123275756836, 0.023779327392578126, 0.022837247848510742, 0.022750207901000977, 0.022556671142578123, 0.022378496170043945, 0.022426624298095704, 0.022381568908691408, 0.02231500816345215, 0.02289254379272461, 0.022403072357177735, 0.022542335510253905, 0.022353919982910156, 0.022362112045288086, 0.022404096603393556, 0.022500352859497072, 0.02253107261657715, 0.02250444793701172, 0.022428672790527345, 0.022418432235717774, 0.02233241653442383, 0.022389759063720704, 0.022434816360473633, 0.022420480728149415, 0.022503423690795898, 0.02242355155944824, 0.022587392807006838, 0.02253004837036133, 0.04750950241088867, 0.02255462455749512, 0.02245529556274414, 0.02269388771057129, 0.022742015838623047, 0.02252390480041504, 0.022503423690795898, 0.02253107261657715, 0.022559743881225586, 0.022559743881225586, 0.02245734405517578, 0.02246963119506836, 0.022397951126098634, 0.02254643249511719, 0.022470687866210936, 0.022647775650024415, 0.02243891143798828, 0.02228326416015625, 0.022187007904052734, 0.02170982360839844, 0.02269388771057129, 0.022635520935058592, 0.022399007797241213, 0.022482912063598634, 0.022408191680908202, 0.022483968734741212, 0.0225167350769043, 0.02253004837036133, 0.02247372817993164, 0.0224849910736084, 0.02231808090209961, 0.0224532470703125, 0.02246246337890625, 0.02240716743469238, 0.022417407989501953, 0.022451200485229493, 0.02234163284301758, 0.022402048110961914, 0.022230016708374024, 0.022042623519897463, 0.0220579833984375, 0.022452255249023438, 0.022406112670898436, 0.022346752166748047, 0.022376447677612304, 0.02267852783203125, 0.02265088081359863, 0.0224768009185791, 0.022940671920776368, 0.022603776931762694, 0.0219238395690918, 0.021757951736450197, 0.0216944637298584, 0.02168217658996582, 0.021613567352294923, 0.022289407730102538, 0.02246963119506836, 0.02243174362182617, 0.022377471923828125, 0.0224399356842041, 0.022323200225830078, 0.02244915199279785, 0.022559743881225586, 0.047440895080566405, 0.022466560363769532, 0.022579200744628908, 0.022544384002685547, 0.022822912216186524, 0.023026687622070312, 0.022591487884521484, 0.02247372817993164, 0.022510623931884764, 0.022584287643432618, 0.022615039825439453, 0.02259660720825195, 0.022487039566040038, 0.022495231628417968, 0.022602752685546876, 0.0225218563079834, 0.02249728012084961, 0.022600704193115235, 0.02282700729370117, 0.022567935943603516, 0.02230271911621094, 0.02247270393371582, 0.022608896255493165, 0.022347776412963868, 0.022796287536621093, 0.02242767906188965, 0.02233852767944336, 0.022391807556152343, 0.022312959671020507, 0.02224844741821289, 0.02244710350036621, 0.022391807556152343, 0.02162892723083496, 0.022434816360473633, 0.022419456481933595, 0.023061504364013673, 0.023794687271118165, 0.02291814422607422, 0.02275328063964844, 0.0224737606048584, 0.022846431732177735, 0.022739967346191405, 0.0224399356842041, 0.022252544403076172, 0.02245529556274414, 0.022370304107666016, 0.022350847244262697, 0.022475776672363282, 0.021917695999145507, 0.021703680038452147, 0.0216494083404541, 0.021736448287963867, 0.021687295913696288, 0.02165452766418457, 0.02166374397277832, 0.02164735984802246, 0.021622783660888673, 0.021659648895263672, 0.02165350341796875, 0.021634048461914062, 0.021605375289916993, 0.021601280212402343, 0.021582847595214845, 0.04577996826171875, 0.021742591857910155, 0.02168012809753418, 0.022021120071411132, 0.02241535949707031, 0.022448160171508788, 0.022464479446411133, 0.022429695129394533, 0.02190540885925293, 0.02169139289855957, 0.02166169548034668, 0.021618688583374023, 0.02166988754272461, 0.021708799362182618, 0.02167705535888672, 0.021703680038452147, 0.02170675277709961, 0.021603328704833984, 0.022540288925170897, 0.022359039306640623, 0.022375455856323244, 0.022378463745117188, 0.022406143188476564, 0.02274508857727051, 0.022146047592163084, 0.021757951736450197, 0.022359039306640623, 0.022399999618530272, 0.022748159408569335, 0.022377471923828125, 0.022382591247558595, 0.022356000900268555, 0.022352863311767578, 0.022393856048583984, 0.021725183486938478, 0.021651456832885742, 0.021613567352294923, 0.021739519119262696, 0.021606399536132814, 0.021602304458618164, 0.021587968826293946, 0.021557247161865235, 0.021554176330566405, 0.021601280212402343, 0.021581823348999024, 0.021748735427856446, 0.02166476821899414, 0.021607423782348634, 0.021644287109375, 0.022733823776245117, 0.02239897537231445, 0.022427648544311524, 0.02243174362182617, 0.022390783309936522, 0.02250444793701172, 0.02248908805847168, 0.022656000137329102, 0.02255462455749512, 0.02251571273803711, 0.02247372817993164, 0.02249932861328125, 0.022417407989501953, 0.02246143913269043, 0.04679884719848633, 0.021774335861206053, 0.0216944637298584, 0.022240255355834963, 0.022406143188476564, 0.02237238311767578, 0.022376415252685546, 0.022520832061767578, 0.022566911697387695, 0.022358015060424806, 0.02244915199279785, 0.02204876708984375, 0.02246348762512207, 0.02243071937561035, 0.022529024124145508, 0.02246553611755371, 0.022758399963378906, 0.022568960189819336, 0.02242355155944824, 0.02242252731323242, 0.022395904541015626, 0.022386688232421875, 0.022372352600097657, 0.022389759063720704, 0.02205695915222168, 0.02208563232421875, 0.02240716743469238, 0.022379520416259766, 0.02263039970397949, 0.02286796760559082, 0.022436864852905275, 0.022495231628417968, 0.02264473533630371, 0.0224768009185791, 0.02244915199279785, 0.022459392547607423, 0.02164735984802246, 0.021584896087646483, 0.02171494483947754, 0.02163199996948242, 0.021613567352294923, 0.02165452766418457, 0.021562368392944335, 0.02166374397277832, 0.021953535079956055, 0.022040576934814454, 0.021609472274780273, 0.021794815063476563, 0.021734432220458986, 0.022769632339477538, 0.02245631980895996, 0.021619712829589844, 0.022328319549560546, 0.02252390480041504, 0.02247987174987793, 0.02167296028137207, 0.02163711929321289, 0.021740543365478517, 0.021622783660888673, 0.022252544403076172, 0.021740543365478517, 0.021704704284667968, 0.021771263122558594, 0.04554956817626953, 0.021817344665527344, 0.021734399795532225, 0.021558271408081055, 0.021544960021972655, 0.021627904891967774, 0.021627904891967774, 0.02165555191040039, 0.021766143798828123, 0.021760000228881835, 0.021741567611694337, 0.02191974449157715, 0.02168627166748047, 0.021700607299804688, 0.021734399795532225, 0.021758975982666014, 0.021625856399536132, 0.02186444854736328, 0.021780479431152345, 0.02165247917175293, 0.02168217658996582, 0.021432319641113282, 0.02170675277709961, 0.021590015411376954, 0.021515264511108398, 0.021569536209106444, 0.021545984268188476, 0.021519359588623048, 0.021651456832885742, 0.021586944580078125, 0.022597631454467772, 0.023916543960571288, 0.022725664138793945, 0.022460384368896483, 0.022580223083496095, 0.022452224731445314, 0.02228121566772461, 0.02166886329650879, 0.021551103591918946, 0.021612543106079102, 0.02163711929321289, 0.021565439224243164, 0.021635072708129883, 0.021566463470458985, 0.022394912719726563, 0.022438880920410156, 0.022375423431396483, 0.022433792114257813, 0.022394880294799805, 0.022401023864746093, 0.02226380729675293, 0.02230886459350586, 0.02188697624206543, 0.021750783920288085, 0.02182963180541992, 0.023613439559936524, 0.02305843162536621, 0.022502399444580077, 0.022609920501708985, 0.0224768009185791, 0.02185932731628418, 0.02165452766418457, 0.021748735427856446, 0.045666305541992185, 0.0216494083404541, 0.0216944637298584, 0.02168217658996582, 0.02166886329650879, 0.021585920333862304, 0.02225971221923828, 0.022445056915283205, 0.022372352600097657, 0.02244812774658203, 0.02245631980895996, 0.022335487365722655, 0.022558719635009765, 0.022304767608642577, 0.0224532470703125, 0.0224849910736084, 0.02243071937561035, 0.021595136642456055, 0.021551103591918946, 0.0217262077331543, 0.021611520767211914, 0.021566463470458985, 0.021792768478393554, 0.021699583053588867, 0.021734399795532225, 0.02164838409423828, 0.02206515121459961, 0.02191564750671387, 0.021738496780395508, 0.02163609504699707, 0.02168524742126465, 0.021646335601806642, 0.021615615844726564, 0.02164019203186035, 0.02167705535888672, 0.021550079345703126, 0.02182963180541992, 0.02245529556274414, 0.02269388771057129, 0.0224901123046875, 0.022406143188476564, 0.022425600051879883, 0.02234880065917969, 0.022487039566040038, 0.022495231628417968, 0.022410240173339844, 0.022487039566040038, 0.022525951385498046, 0.02243174362182617, 0.022336511611938475, 0.02234060859680176, 0.022337535858154296, 0.02232524871826172, 0.022363136291503907, 0.021784576416015625, 0.021576704025268553, 0.0216760311126709, 0.02170675277709961, 0.021560319900512694, 0.0216627197265625, 0.02201907157897949, 0.02225868797302246, 0.022460416793823244]",tokens/s,44.35077671138517,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31ed-547746943afaf7af5bf08b70;ef9a9642-cc44-4418-b2d2-b572ea782084) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-6677947a-7c9f2dcb21c40e9151ff915b;f6a6f743-9366-479a-9dbb-d0bdbf215704) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3196-3693f5993f9856e47cb8d30c;09398bdc-62bb-405d-8239-eb4263b73c73) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe4e-184ed3001554733059ef9cbc;d6143432-f2f4-4051-9328-e02ff305f4f8) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1231.843328,1005.060096,0.0,358.612992,318.913024,s,21,0.18355030345916748,0.008740490640912738,0.0003350787955471485,0.00862604808807373,0.008949695587158203,0.00923475170135498,0.009869042778015138,"[0.010027615547180176, 0.008949695587158203, 0.008625503540039062, 0.00923475170135498, 0.008610015869140624, 0.00866915225982666, 0.00862604808807373, 0.008871392250061035, 0.008561504364013673, 0.008556192398071288, 0.008633919715881348, 0.00886348819732666, 0.008537247657775878, 0.008714079856872559, 0.008701151847839355, 0.008674336433410644, 0.008554752349853516, 0.008511584281921386, 0.00850607967376709, 0.008580191612243653, 0.008541600227355957]",tokens/s,29288.973642018205,kWh,1.0310658924635563e-07,5.648848538782815e-08,2.2294035675046953e-07,3.825354313846533e-07,tokens/kWh,669219055.2738177,MB,1232.13824,1005.060096,0.0,358.612992,328.809472,s,21,10.26999664306641,0.48904745919363846,0.00403887718511448,0.487286376953125,0.4949382019042969,0.4976250915527344,0.4998394714355469,"[0.50039306640625, 0.4949382019042969, 0.491740478515625, 0.4892876586914062, 0.4909471740722656, 0.4976250915527344, 0.487286376953125, 0.48865457153320313, 0.48538818359375, 0.48481610107421874, 0.48979806518554686, 0.48635006713867185, 0.48722393798828123, 0.485962646484375, 0.4894192504882813, 0.48873077392578124, 0.4857748413085938, 0.4860442810058594, 0.48649954223632813, 0.4870877685546875, 0.486028564453125]",tokens/s,128.821853208024,kWh,5.764060866563101e-06,3.1584372687529265e-06,9.864422210484918e-06,1.878692034580095e-05,tokens/kWh,3353396.8761453275,,s,1322,10.43460300683975,0.007893043121663959,0.0011110576671242386,0.007707647800445557,0.007921459436416627,0.008081356382369995,0.016560291538238524,"[0.009092096328735352, 0.009058367729187012, 0.008918975830078125, 0.008963071823120117, 0.008934399604797364, 0.008929280281066895, 0.00934502410888672, 0.008104960441589355, 0.008223744392395019, 0.007952415943145752, 0.007911392211914062, 0.007724063873291016, 0.007740384101867676, 0.007718912124633789, 0.007732223987579345, 0.007771135807037354, 0.007724031925201416, 0.007733248233795166, 0.007701568126678467, 0.00769324779510498, 0.007707647800445557, 0.007727104187011719, 0.007730175971984863, 0.007672832012176513, 0.007747583866119385, 0.007762944221496582, 0.007890944004058837, 0.007702527999877929, 0.007723008155822754, 0.007799808025360107, 0.007707647800445557, 0.007696383953094482, 0.007733248233795166, 0.0076902399063110355, 0.007792640209197998, 0.007727104187011719, 0.007721983909606934, 0.007738368034362793, 0.007664639949798584, 0.0076902399063110355, 0.007693312168121338, 0.0076902399063110355, 0.0076912641525268555, 0.007764992237091065, 0.007812096118927002, 0.0076984319686889645, 0.007737343788146973, 0.007910399913787843, 0.007769087791442871, 0.007794688224792481, 0.007959551811218261, 0.007833600044250488, 0.007940095901489258, 0.007715839862823487, 0.007907328128814697, 0.007768064022064209, 0.0076984319686889645, 0.007676928043365478, 0.007727104187011719, 0.007711743831634522, 0.00790937614440918, 0.007811071872711181, 0.01726470375061035, 0.007868351936340333, 0.007919616222381591, 0.008102911949157715, 0.008324095726013184, 0.008201215744018555, 0.008301568031311036, 0.008308735847473145, 0.008286208152770995, 0.00830463981628418, 0.008102911949157715, 0.00808140754699707, 0.007987199783325195, 0.007973919868469239, 0.007829472064971924, 0.00774348783493042, 0.007905280113220215, 0.007950335979461669, 0.008045568466186523, 0.007724031925201416, 0.00809779167175293, 0.007895040035247802, 0.007924736022949219, 0.007701504230499268, 0.007714816093444824, 0.007802879810333252, 0.007689216136932373, 0.007731200218200684, 0.0076902399063110355, 0.007704576015472412, 0.0077209601402282715, 0.0077036161422729494, 0.00770963191986084, 0.00785100793838501, 0.007706624031066894, 0.00785920000076294, 0.007693312168121338, 0.00774348783493042, 0.007714816093444824, 0.007696383953094482, 0.007706624031066894, 0.007671807765960693, 0.008090656280517578, 0.00802505588531494, 0.007776256084442139, 0.007709695816040039, 0.007713791847229004, 0.007703551769256592, 0.00783462381362915, 0.007703551769256592, 0.0076984319686889645, 0.007723008155822754, 0.007885824203491211, 0.007710720062255859, 0.007711743831634522, 0.007705599784851074, 0.007715839862823487, 0.007755775928497314, 0.007683072090148926, 0.007688191890716553, 0.007709695816040039, 0.007748608112335205, 0.007685120105743408, 0.01663488006591797, 0.007726079940795898, 0.007717887878417969, 0.0077281279563903805, 0.007688191890716553, 0.007729152202606201, 0.0076943359375, 0.007702527999877929, 0.007713888168334961, 0.007737247943878174, 0.0077209601402282715, 0.007712768077850342, 0.0077608962059021, 0.007717887878417969, 0.007710720062255859, 0.008239104270935058, 0.008116224288940429, 0.008475647926330567, 0.008458239555358887, 0.008246272087097169, 0.007942143917083741, 0.00795961618423462, 0.007945151805877685, 0.007906303882598878, 0.007910399913787843, 0.007938047885894776, 0.007723008155822754, 0.007715839862823487, 0.0076902399063110355, 0.0076902399063110355, 0.0076902399063110355, 0.007681024074554443, 0.007697408199310303, 0.007683104038238526, 0.0076943359375, 0.0077506241798400875, 0.00775270414352417, 0.007787519931793213, 0.007673855781555176, 0.007689216136932373, 0.007712768077850342, 0.007634943962097168, 0.007689216136932373, 0.0076902399063110355, 0.0076912641525268555, 0.007678976058959961, 0.0076902399063110355, 0.007709695816040039, 0.007890944004058837, 0.007795711994171142, 0.007898111820220948, 0.007674880027770996, 0.007759871959686279, 0.007707647800445557, 0.007746560096740723, 0.007889920234680176, 0.007853055953979492, 0.007882751941680909, 0.007867392063140868, 0.0077547521591186525, 0.007782400131225586, 0.007676928043365478, 0.007889920234680176, 0.01763020706176758, 0.008062975883483887, 0.007933951854705811, 0.00788479995727539, 0.007878655910491944, 0.007974912166595459, 0.007945216178894043, 0.007906303882598878, 0.007907328128814697, 0.007918591976165772, 0.007847936153411865, 0.007732223987579345, 0.00775270414352417, 0.007715839862823487, 0.007693312168121338, 0.007790592193603516, 0.007701504230499268, 0.00767084789276123, 0.007698368072509765, 0.007670783996582031, 0.007679999828338623, 0.007670783996582031, 0.007672832012176513, 0.0076871681213378906, 0.007737343788146973, 0.007924736022949219, 0.007703551769256592, 0.007711743831634522, 0.007705599784851074, 0.007706624031066894, 0.007683072090148926, 0.007726079940795898, 0.007684095859527588, 0.007717887878417969, 0.007829504013061523, 0.007885824203491211, 0.007727104187011719, 0.007808000087738037, 0.007792640209197998, 0.007704576015472412, 0.007664639949798584, 0.007731200218200684, 0.007749631881713868, 0.007715839862823487, 0.007766016006469726, 0.007790592193603516, 0.007709695816040039, 0.007795711994171142, 0.007740416049957275, 0.007689216136932373, 0.007677951812744141, 0.007688191890716553, 0.007706624031066894, 0.007693312168121338, 0.007697440147399902, 0.007667679786682129, 0.0076912641525268555, 0.007759871959686279, 0.007836671829223632, 0.007707647800445557, 0.007697408199310303, 0.007692287921905518, 0.007705599784851074, 0.01658367919921875, 0.007725056171417236, 0.007701568126678467, 0.007698368072509765, 0.007697408199310303, 0.007677984237670898, 0.007667744159698486, 0.00769324779510498, 0.007798816204071045, 0.007762944221496582, 0.00780079984664917, 0.008656895637512207, 0.00830361557006836, 0.008055839538574219, 0.008196063995361328, 0.007998464107513427, 0.00795136022567749, 0.008061951637268066, 0.007879712104797364, 0.00796668815612793, 0.007711808204650879, 0.0077086081504821775, 0.007677951812744141, 0.007714816093444824, 0.007709695816040039, 0.007789567947387695, 0.007707647800445557, 0.0076912641525268555, 0.007716864109039307, 0.007702527999877929, 0.007709695816040039, 0.007679999828338623, 0.007709695816040039, 0.007703551769256592, 0.007715839862823487, 0.007775231838226319, 0.007758848190307617, 0.007897088050842285, 0.007768064022064209, 0.007769087791442871, 0.007915520191192627, 0.007915520191192627, 0.007895040035247802, 0.0076943359375, 0.007679999828338623, 0.007676928043365478, 0.00769536018371582, 0.007711743831634522, 0.007781375885009765, 0.007708735942840576, 0.00771065616607666, 0.0076984319686889645, 0.007763967990875244, 0.007647232055664062, 0.007775231838226319, 0.007683072090148926, 0.007717887878417969, 0.007763967990875244, 0.0076943359375, 0.007715839862823487, 0.007766016006469726, 0.007777279853820801, 0.007678976058959961, 0.016675840377807616, 0.007716864109039307, 0.0076984319686889645, 0.007701504230499268, 0.00780185604095459, 0.007713791847229004, 0.007725056171417236, 0.007714816093444824, 0.007688191890716553, 0.008960000038146973, 0.008425472259521484, 0.00793497610092163, 0.007820288181304931, 0.007699456214904785, 0.007699456214904785, 0.007682047843933106, 0.007803904056549072, 0.007661568164825439, 0.007709695816040039, 0.007979008197784423, 0.007948287963867188, 0.008050687789916992, 0.007895040035247802, 0.007890944004058837, 0.007904255867004394, 0.00800051212310791, 0.007781375885009765, 0.007700479984283447, 0.0076902399063110355, 0.0076902399063110355, 0.007723008155822754, 0.007730175971984863, 0.007696383953094482, 0.007681024074554443, 0.008360960006713868, 0.009117695808410644, 0.009527296066284179, 0.00808243179321289, 0.00799955177307129, 0.00785094404220581, 0.007670783996582031, 0.007714816093444824, 0.007681024074554443, 0.007689216136932373, 0.007984127998352051, 0.007931903839111328, 0.007922688007354736, 0.00800870418548584, 0.007964672088623047, 0.008010751724243164, 0.007980031967163086, 0.00773529577255249, 0.007676928043365478, 0.0076871681213378906, 0.00769536018371582, 0.007947264194488525, 0.008145919799804687, 0.007927807807922363, 0.007897088050842285, 0.007776256084442139, 0.007714816093444824, 0.007714816093444824, 0.007712768077850342, 0.016688127517700196, 0.007721983909606934, 0.007673855781555176, 0.00773529577255249, 0.008170495986938477, 0.008371199607849121, 0.0077209601402282715, 0.0077281599044799806, 0.007708640098571778, 0.007672832012176513, 0.0076943359375, 0.0076943359375, 0.007738368034362793, 0.00773529577255249, 0.007715839862823487, 0.007730175971984863, 0.0076871681213378906, 0.007707647800445557, 0.007707647800445557, 0.007686143875122071, 0.007689216136932373, 0.0076984319686889645, 0.007703551769256592, 0.007768064022064209, 0.00774348783493042, 0.007772160053253174, 0.0076871681213378906, 0.007663616180419922, 0.007666687965393066, 0.007672832012176513, 0.0076656961441040035, 0.007675871849060058, 0.0076871681213378906, 0.00764518404006958, 0.007671807765960693, 0.007745535850524903, 0.007677951812744141, 0.0076902399063110355, 0.007734272003173828, 0.007685120105743408, 0.0076943359375, 0.007675903797149658, 0.007677951812744141, 0.007675903797149658, 0.007730175971984863, 0.007740416049957275, 0.007658495903015137, 0.007704576015472412, 0.007727104187011719, 0.007727104187011719, 0.007701504230499268, 0.007684095859527588, 0.0077578239440917966, 0.007705599784851074, 0.007681024074554443, 0.007710720062255859, 0.007685184001922607, 0.007728064060211181, 0.007676959991455078, 0.007675871849060058, 0.007856128215789794, 0.00790118408203125, 0.0079267840385437, 0.017124351501464845, 0.007890944004058837, 0.007896063804626464, 0.007844863891601562, 0.007715839862823487, 0.007679999828338623, 0.007675903797149658, 0.0076912641525268555, 0.007697408199310303, 0.007703551769256592, 0.007701504230499268, 0.007693312168121338, 0.00774348783493042, 0.0076984319686889645, 0.007705599784851074, 0.007809023857116699, 0.007742464065551758, 0.007864319801330566, 0.007906303882598878, 0.007886847972869874, 0.007703551769256592, 0.007671807765960693, 0.007692287921905518, 0.007684095859527588, 0.007683072090148926, 0.007726079940795898, 0.007716864109039307, 0.007705599784851074, 0.007701504230499268, 0.007738368034362793, 0.007684095859527588, 0.007758848190307617, 0.007689216136932373, 0.007742464065551758, 0.007646207809448242, 0.007659520149230957, 0.007811071872711181, 0.007968768119812012, 0.007933951854705811, 0.007763967990875244, 0.007693312168121338, 0.0076943359375, 0.007675903797149658, 0.007677951812744141, 0.007677951812744141, 0.007675936222076416, 0.007691232204437256, 0.0076687359809875484, 0.007788544178009033, 0.007912447929382324, 0.0077209601402282715, 0.007674880027770996, 0.007666687965393066, 0.007675903797149658, 0.007684095859527588, 0.007665664196014404, 0.007678976058959961, 0.008128512382507324, 0.008069120407104492, 0.007811071872711181, 0.007938047885894776, 0.007679999828338623, 0.007688223838806152, 0.016586719512939455, 0.007676928043365478, 0.007712768077850342, 0.007682047843933106, 0.007674880027770996, 0.007696383953094482, 0.007659520149230957, 0.007680031776428222, 0.007677919864654541, 0.0076912641525268555, 0.0076912641525268555, 0.007704576015472412, 0.007708672046661377, 0.007681024074554443, 0.007847936153411865, 0.007811071872711181, 0.00773632001876831, 0.007725056171417236, 0.0076574721336364745, 0.0076769919395446775, 0.007656383991241455, 0.007673855781555176, 0.007708672046661377, 0.00769536018371582, 0.007670783996582031, 0.007692287921905518, 0.007771135807037354, 0.007670783996582031, 0.007665664196014404, 0.007670783996582031, 0.007733248233795166, 0.007681024074554443, 0.007705599784851074, 0.007664639949798584, 0.0076605439186096195, 0.007678976058959961, 0.007718912124633789, 0.007679999828338623, 0.007672832012176513, 0.007723008155822754, 0.007666687965393066, 0.007671807765960693, 0.007719999790191651, 0.007715775966644287, 0.007756832122802734, 0.007769055843353271, 0.007670783996582031, 0.007681024074554443, 0.0076871681213378906, 0.007742464065551758, 0.007697408199310303, 0.007704576015472412, 0.007715839862823487, 0.0077199358940124516, 0.007715839862823487, 0.007699456214904785, 0.007782400131225586, 0.007672832012176513, 0.007705599784851074, 0.007671807765960693, 0.007681024074554443, 0.007676928043365478, 0.007688191890716553, 0.016537599563598633, 0.007702527999877929, 0.0076912641525268555, 0.00780185604095459, 0.007649280071258545, 0.0077199358940124516, 0.0076605439186096195, 0.007711743831634522, 0.0076360001564025875, 0.0076789441108703616, 0.007679999828338623, 0.007708672046661377, 0.007693376064300537, 0.0076594557762145994, 0.007683072090148926, 0.007707647800445557, 0.007715839862823487, 0.007693312168121338, 0.007656447887420655, 0.007662591934204102, 0.007688191890716553, 0.007667712211608887, 0.007652383804321289, 0.007774176120758057, 0.007679999828338623, 0.0076277761459350585, 0.0077199358940124516, 0.007705599784851074, 0.007661568164825439, 0.007655424118041992, 0.00765337610244751, 0.007646207809448242, 0.007651328086853027, 0.00765337610244751, 0.007684095859527588, 0.007672832012176513, 0.00765235185623169, 0.007797760009765625, 0.0076574721336364745, 0.007676928043365478, 0.007676928043365478, 0.0076605439186096195, 0.007675936222076416, 0.007666656017303467, 0.007671807765960693, 0.007673855781555176, 0.007711743831634522, 0.007681024074554443, 0.0076605439186096195, 0.007897088050842285, 0.007715839862823487, 0.007639039993286132, 0.007663616180419922, 0.007663616180419922, 0.007703551769256592, 0.007701504230499268, 0.007685120105743408, 0.007809023857116699, 0.007738368034362793, 0.007857151985168457, 0.007700479984283447, 0.007667744159698486, 0.007658463954925537, 0.01657142448425293, 0.007717855930328369, 0.007683072090148926, 0.007674880027770996, 0.007670783996582031, 0.007671807765960693, 0.0076984319686889645, 0.007650303840637207, 0.007666687965393066, 0.007692287921905518, 0.007678976058959961, 0.00769536018371582, 0.007678976058959961, 0.007858176231384278, 0.0076912641525268555, 0.007671807765960693, 0.007683072090148926, 0.007661568164825439, 0.007699456214904785, 0.00769536018371582, 0.007699456214904785, 0.007732223987579345, 0.0076902399063110355, 0.007854080200195313, 0.007869440078735352, 0.007822336196899414, 0.007726079940795898, 0.008191007614135742, 0.008648672103881837, 0.008141823768615723, 0.007966720104217529, 0.007977983951568603, 0.008117247581481933, 0.007708672046661377, 0.007697408199310303, 0.007654399871826172, 0.007683072090148926, 0.007730175971984863, 0.007701504230499268, 0.008463359832763672, 0.008068096160888672, 0.007916543960571289, 0.007705599784851074, 0.007667712211608887, 0.007699456214904785, 0.007643136024475097, 0.007656447887420655, 0.007656447887420655, 0.007661568164825439, 0.0076605439186096195, 0.007755775928497314, 0.007671807765960693, 0.0076871681213378906, 0.007669760227203369, 0.0077619199752807615, 0.007662591934204102, 0.0076605439186096195, 0.007664639949798584, 0.007772160053253174, 0.007708672046661377, 0.007676928043365478, 0.007662591934204102, 0.00795750379562378, 0.017183744430541992, 0.007907328128814697, 0.007918591976165772, 0.00783462381362915, 0.007717887878417969, 0.0076943359375, 0.007677951812744141, 0.007763967990875244, 0.007702527999877929, 0.007685120105743408, 0.007701504230499268, 0.007684095859527588, 0.007709695816040039, 0.007688191890716553, 0.007730175971984863, 0.007692287921905518, 0.007733248233795166, 0.007673855781555176, 0.007678976058959961, 0.007662591934204102, 0.007718912124633789, 0.007674880027770996, 0.007672832012176513, 0.007670783996582031, 0.007658495903015137, 0.007688191890716553, 0.007734272003173828, 0.007763967990875244, 0.007769087791442871, 0.007686143875122071, 0.007650303840637207, 0.007665664196014404, 0.007681024074554443, 0.007721983909606934, 0.0077281279563903805, 0.007683072090148926, 0.00769536018371582, 0.0077916159629821775, 0.007670783996582031, 0.007696383953094482, 0.0076605439186096195, 0.007673920154571533, 0.0076819839477539065, 0.007651328086853027, 0.0076943359375, 0.007655424118041992, 0.0076574721336364745, 0.007684095859527588, 0.007669760227203369, 0.007726079940795898, 0.007689216136932373, 0.007771135807037354, 0.0076912641525268555, 0.007823359966278077, 0.007716864109039307, 0.007703551769256592, 0.007678976058959961, 0.007688191890716553, 0.0077844481468200685, 0.007733248233795166, 0.007706624031066894, 0.0077199358940124516, 0.007748608112335205, 0.016525312423706053, 0.007715839862823487, 0.0077209601402282715, 0.007697408199310303, 0.007702527999877929, 0.007712800025939942, 0.007707615852355957, 0.007683072090148926, 0.007696383953094482, 0.007737343788146973, 0.0076687359809875484, 0.007705599784851074, 0.007711743831634522, 0.007686143875122071, 0.007732223987579345, 0.00773632001876831, 0.00787660789489746, 0.00830361557006836, 0.007911424160003662, 0.007717887878417969, 0.007702527999877929, 0.007723008155822754, 0.007732223987579345, 0.007715839862823487, 0.007697408199310303, 0.007795711994171142, 0.007724031925201416, 0.007716864109039307, 0.007809023857116699, 0.007868415832519531, 0.0077608962059021, 0.00773632001876831, 0.007714816093444824, 0.007705599784851074, 0.007703551769256592, 0.007706624031066894, 0.007705599784851074, 0.007702527999877929, 0.007710720062255859, 0.007686143875122071, 0.007699456214904785, 0.007853055953979492, 0.007704576015472412, 0.007655424118041992, 0.007672832012176513, 0.007702527999877929, 0.007703551769256592, 0.007702527999877929, 0.007742464065551758, 0.0076871681213378906, 0.007737343788146973, 0.007697408199310303, 0.007700479984283447, 0.007683072090148926, 0.007702527999877929, 0.007733248233795166, 0.007663616180419922, 0.007678976058959961, 0.007686143875122071, 0.00769536018371582, 0.0077209601402282715, 0.007686143875122071, 0.007703551769256592, 0.01681612777709961, 0.007713791847229004, 0.007679999828338623, 0.007707712173461914, 0.007690176010131836, 0.007659520149230957, 0.007675903797149658, 0.007814144134521485, 0.007681024074554443, 0.007721983909606934, 0.007671807765960693, 0.007718912124633789, 0.007726079940795898, 0.007714816093444824, 0.007703551769256592, 0.007715839862823487, 0.0076943359375, 0.00769536018371582, 0.007705599784851074, 0.007677951812744141, 0.007672832012176513, 0.007671807765960693, 0.007730175971984863, 0.007686143875122071, 0.007659520149230957, 0.0076912641525268555, 0.0076984319686889645, 0.007662591934204102, 0.007699456214904785, 0.0077578239440917966, 0.0076943359375, 0.0077404799461364745, 0.007823296070098877, 0.007710720062255859, 0.0076943359375, 0.007682047843933106, 0.00769536018371582, 0.007639039993286132, 0.007710720062255859, 0.007692287921905518, 0.007687200069427491, 0.007911392211914062, 0.00794316816329956, 0.007654399871826172, 0.0076943359375, 0.007661568164825439, 0.0076902399063110355, 0.007673855781555176, 0.007707647800445557, 0.007701504230499268, 0.007683072090148926, 0.007661568164825439, 0.00773632001876831, 0.007669760227203369, 0.007759871959686279, 0.007682047843933106, 0.007688191890716553, 0.007670783996582031, 0.00773638391494751, 0.007666624069213867, 0.007685120105743408, 0.007774208068847656, 0.007774208068847656, 0.016714752197265623, 0.007661568164825439, 0.007718912124633789, 0.007678976058959961, 0.0076902399063110355, 0.0076687359809875484, 0.007655424118041992, 0.007704576015472412, 0.007724031925201416, 0.007693312168121338, 0.007682047843933106, 0.007713856220245362, 0.007695295810699463, 0.007727104187011719, 0.007708672046661377, 0.0076912641525268555, 0.0076943359375, 0.007725056171417236, 0.007659520149230957, 0.008349696159362792, 0.008087552070617676, 0.00800153636932373, 0.007982079982757568, 0.007956480026245117, 0.007929855823516846, 0.008227840423583984, 0.008046591758728027, 0.008014847755432129, 0.007885824203491211, 0.007773183822631836, 0.0076871681213378906, 0.007777279853820801, 0.007727104187011719, 0.007721983909606934, 0.007692287921905518, 0.007705599784851074, 0.007746560096740723, 0.007696383953094482, 0.007721983909606934, 0.007792640209197998, 0.007708672046661377, 0.007921664237976075, 0.007707647800445557, 0.007706624031066894, 0.007712768077850342, 0.007642111778259277, 0.007730175971984863, 0.007689216136932373, 0.007717887878417969, 0.007708672046661377, 0.007692287921905518, 0.007714816093444824, 0.007763967990875244, 0.007715839862823487, 0.007689216136932373, 0.007714816093444824, 0.007768064022064209, 0.0076912641525268555, 0.007679999828338623, 0.007677951812744141, 0.007693312168121338, 0.007681024074554443, 0.007692287921905518, 0.01683967971801758, 0.007956480026245117, 0.00795136022567749, 0.007931968212127686, 0.007883711814880371, 0.0077209601402282715, 0.007726111888885498, 0.007844831943511963, 0.007725056171417236, 0.007685120105743408, 0.007966720104217529, 0.007915520191192627, 0.007905280113220215, 0.007930880069732665, 0.0077209601402282715, 0.0077199358940124516, 0.007727104187011719, 0.007699456214904785, 0.0076912641525268555, 0.0076912641525268555, 0.007654399871826172, 0.007792640209197998, 0.0076871681213378906, 0.007699456214904785, 0.0076902399063110355, 0.007671807765960693, 0.0077209601402282715, 0.0076984319686889645, 0.007840767860412597, 0.007961599826812745, 0.007816192150115966, 0.007692287921905518, 0.007686143875122071, 0.007718912124633789, 0.007701504230499268, 0.007663616180419922, 0.007713791847229004, 0.007682047843933106, 0.007669760227203369, 0.007689216136932373, 0.007676928043365478, 0.00800153636932373, 0.007803904056549072, 0.007771135807037354, 0.0077322559356689455, 0.007685152053833008, 0.007731135845184326, 0.007712768077850342, 0.007674880027770996, 0.007709695816040039, 0.007684095859527588, 0.007681024074554443, 0.00773632001876831, 0.007786496162414551, 0.007685120105743408, 0.007683072090148926, 0.007667712211608887, 0.007677951812744141, 0.007724031925201416, 0.007700479984283447, 0.007700479984283447, 0.007817215919494629, 0.0077209601402282715, 0.01655705642700195, 0.007729152202606201, 0.007725056171417236, 0.007679999828338623, 0.00773529577255249, 0.007704576015472412, 0.007726079940795898, 0.007715839862823487, 0.007713791847229004, 0.00774454402923584, 0.007709663867950439, 0.007705599784851074, 0.007731200218200684, 0.007697408199310303, 0.007729152202606201, 0.007713791847229004, 0.00773632001876831, 0.007699456214904785, 0.007676928043365478, 0.007709695816040039, 0.007763967990875244, 0.007693312168121338, 0.007684095859527588, 0.007672832012176513, 0.007727104187011719, 0.007699456214904785, 0.007692287921905518, 0.007696383953094482, 0.007666687965393066, 0.007671807765960693, 0.007734272003173828, 0.007797760009765625, 0.007671807765960693, 0.007671807765960693, 0.007674880027770996, 0.007686143875122071, 0.007685120105743408, 0.007680031776428222, 0.007708640098571778, 0.007688191890716553, 0.007741439819335938, 0.007726079940795898, 0.007679999828338623, 0.007747583866119385, 0.007707647800445557, 0.007661568164825439, 0.007709695816040039, 0.007714816093444824, 0.007689216136932373, 0.007710720062255859, 0.007702527999877929, 0.00769536018371582, 0.007712768077850342, 0.007729152202606201, 0.007714816093444824, 0.0076984319686889645, 0.00769536018371582, 0.007725056171417236, 0.007663616180419922, 0.007682112216949463, 0.007765952110290527, 0.007704576015472412, 0.007675903797149658, 0.016482303619384766, 0.0077209601402282715, 0.007693312168121338, 0.007663616180419922, 0.007686143875122071, 0.007688191890716553, 0.007673855781555176, 0.007783423900604248, 0.007667712211608887, 0.007689216136932373, 0.007803904056549072, 0.00769536018371582, 0.007676928043365478, 0.007678976058959961, 0.007724031925201416, 0.007696383953094482, 0.007805952072143555, 0.007718912124633789, 0.007708672046661377, 0.0076912641525268555, 0.007655424118041992, 0.0076902399063110355, 0.007697408199310303, 0.007712768077850342, 0.007770112037658691, 0.007656447887420655, 0.00785920000076294, 0.007704576015472412, 0.007676928043365478, 0.007677951812744141, 0.007712768077850342, 0.0076943359375, 0.007666687965393066, 0.007725056171417236, 0.007830527782440186, 0.007674880027770996, 0.00769536018371582, 0.007692287921905518, 0.007716864109039307, 0.0076687359809875484, 0.007670783996582031, 0.007796735763549805, 0.007696383953094482, 0.0076943359375, 0.007697408199310303, 0.007675903797149658, 0.007701504230499268, 0.007655424118041992, 0.007669760227203369, 0.007663616180419922, 0.007686143875122071, 0.007696383953094482, 0.00764518404006958, 0.00769536018371582, 0.007786496162414551, 0.007718912124633789, 0.007711743831634522, 0.0077209601402282715, 0.007987199783325195, 0.0076871681213378906, 0.007702527999877929, 0.007797760009765625, 0.007693312168121338, 0.016484352111816408, 0.007674880027770996, 0.007697408199310303, 0.007717887878417969, 0.007707647800445557, 0.007704576015472412, 0.007643136024475097, 0.007710720062255859, 0.0077199358940124516, 0.0077199358940124516, 0.007701504230499268, 0.007710720062255859, 0.0076984319686889645, 0.0077619199752807615, 0.007701504230499268, 0.007733248233795166, 0.007715871810913086, 0.007692255973815918, 0.007709695816040039, 0.0076943359375, 0.007684095859527588, 0.0076912641525268555, 0.007692287921905518, 0.007710720062255859, 0.007682047843933106, 0.007670783996582031, 0.007689216136932373, 0.007673855781555176, 0.007685120105743408, 0.007702527999877929, 0.007711743831634522, 0.007717887878417969, 0.007881728172302246, 0.007721983909606934, 0.007707647800445557, 0.007847936153411865, 0.007799808025360107, 0.0077281279563903805, 0.007715839862823487, 0.0076902399063110355, 0.007700479984283447, 0.007724031925201416, 0.0076943359375, 0.007865344047546387, 0.0076871681213378906, 0.007723008155822754, 0.007727104187011719, 0.0077199358940124516, 0.007688191890716553, 0.007844863891601562, 0.007663616180419922, 0.007730175971984863, 0.007700479984283447, 0.007699456214904785, 0.00773529577255249, 0.007684095859527588, 0.007678976058959961, 0.007696383953094482, 0.007714816093444824, 0.00782643222808838, 0.007778304100036621, 0.007774208068847656, 0.007678976058959961, 0.016561151504516602, 0.007704576015472412, 0.007812096118927002, 0.007910399913787843, 0.007922688007354736, 0.008080384254455567, 0.007913472175598145, 0.007903232097625732, 0.007676928043365478, 0.007712831974029541, 0.007683008193969726, 0.007683072090148926, 0.007721983909606934, 0.007693312168121338, 0.007700479984283447, 0.0077199358940124516, 0.007685120105743408, 0.007682047843933106, 0.007686143875122071, 0.007696383953094482, 0.007689216136932373, 0.007641088008880615, 0.007703551769256592, 0.0076912641525268555, 0.007705599784851074, 0.0077281599044799806, 0.007726047992706299, 0.007732223987579345, 0.007673855781555176, 0.0076687359809875484, 0.00778547191619873, 0.007723008155822754, 0.007731200218200684, 0.007683072090148926, 0.007699456214904785, 0.007643136024475097, 0.007693312168121338, 0.007725088119506836, 0.007687136173248291, 0.007711743831634522, 0.007689216136932373, 0.007809023857116699, 0.007697408199310303, 0.007674911975860596, 0.0076861119270324706, 0.007665664196014404, 0.007709695816040039, 0.007683072090148926, 0.007771135807037354, 0.007688191890716553, 0.007716864109039307, 0.007693312168121338, 0.007705599784851074, 0.007773183822631836, 0.007767039775848389, 0.0077578239440917966, 0.0077209601402282715, 0.007709695816040039, 0.007776256084442139, 0.007709695816040039, 0.007684095859527588, 0.007709695816040039, 0.00769536018371582, 0.016553983688354493, 0.007738368034362793, 0.007707647800445557, 0.0077199358940124516, 0.007712768077850342, 0.007697408199310303, 0.007688191890716553, 0.007713791847229004, 0.007730175971984863, 0.007711743831634522, 0.007738368034362793, 0.007681024074554443, 0.007712768077850342, 0.007685120105743408, 0.007709695816040039, 0.007734272003173828, 0.007716864109039307, 0.007678976058959961, 0.0076912641525268555, 0.007699456214904785, 0.0077209601402282715, 0.007701504230499268, 0.007718912124633789, 0.007665664196014404, 0.0076912641525268555, 0.007706624031066894, 0.007685120105743408, 0.007721983909606934, 0.007716864109039307, 0.007688191890716553, 0.007689216136932373, 0.007676928043365478, 0.007684095859527588, 0.007715839862823487, 0.007701504230499268, 0.007725056171417236, 0.0076912641525268555, 0.007726079940795898, 0.007692287921905518, 0.007766016006469726, 0.00800051212310791, 0.00773529577255249, 0.007711743831634522, 0.007738368034362793, 0.0076912641525268555, 0.0076574721336364745, 0.0077066879272460935, 0.007749567985534668, 0.007740416049957275, 0.007701504230499268, 0.007696383953094482, 0.007714816093444824, 0.0076871681213378906, 0.007677951812744141, 0.0076902399063110355, 0.007684095859527588, 0.007696415901184082, 0.007714784145355225, 0.007678976058959961, 0.007717919826507568, 0.0077270717620849605, 0.007686143875122071, 0.007689216136932373]",tokens/s,126.69384730146851,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3027-41fe298d0fb317040e7d28cb;78701771-d77b-4920-ad67-ad6aa418a428) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1758, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2397, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b88-6a4a76de2dccf9bc713d96a8;f6ab006f-d27f-4a7c-8c61-ca49fb9fe3f1) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1869.656064,3326.60736,0.0,2682.257408,2578.238464,s,10,1.3870304412841796,0.13870304412841797,0.00244373759901306,0.13811017608642578,0.14131588592529296,0.14323313064575197,0.14476692642211916,"[0.14515037536621095, 0.13752809143066405, 0.1378865966796875, 0.13688485717773438, 0.13833375549316407, 0.13642874145507813, 0.13711347961425782, 0.13835682678222655, 0.1384578857421875, 0.14088983154296875]",tokens/s,1845.6696578554026,kWh,1.6179650182774805e-06,8.864855210497944e-07,6.8487079142743225e-06,9.353158453601597e-06,tokens/kWh,27370433.342912387,MB,1869.656064,3328.704512,0.0,2682.257408,2667.0976,s,10,82.21512695312498,8.2215126953125,0.02726168309176718,8.22408544921875,8.254994140625,8.26155078125,8.26679609375,"[8.218158203125, 8.2300126953125, 8.231037109375, 8.253537109375, 8.2426494140625, 8.18561865234375, 8.1915498046875, 8.18607568359375, 8.268107421875, 8.208380859375]",tokens/s,7.662823416415751,kWh,9.660354430590225e-05,5.294595236465346e-05,0.0003949548913061273,0.000544504387976683,tokens/kWh,115701.54693169857,,s,629,83.32182740783702,0.1324671341936994,0.01648122008836376,0.13012069702148438,0.13181768188476561,0.13225840454101562,0.2677710559082031,"[0.1353492431640625, 0.13449932861328126, 0.13354396057128906, 0.13218812561035156, 0.1321062469482422, 0.132279296875, 0.1320765380859375, 0.1297838134765625, 0.12942950439453124, 0.13008486938476563, 0.1297357482910156, 0.12950111389160157, 0.12954214477539064, 0.12996812438964844, 0.12972236633300782, 0.129586181640625, 0.12967730712890624, 0.12944589233398437, 0.12974386596679688, 0.12966400146484375, 0.12964044189453125, 0.12967730712890624, 0.13006745910644532, 0.13016166687011718, 0.12972647094726564, 0.13020672607421874, 0.1301012420654297, 0.13006541442871095, 0.1300264892578125, 0.13026918029785156, 0.1298155517578125, 0.12965580749511718, 0.12980120849609375, 0.13100137329101563, 0.12973667907714845, 0.12997222900390626, 0.12998655700683595, 0.13009100341796875, 0.13086618041992187, 0.13041253662109376, 0.13003570556640626, 0.13003366088867188, 0.12998655700683595, 0.13012069702148438, 0.12974899291992187, 0.1296711730957031, 0.13009510803222657, 0.13012582397460937, 0.13009408569335937, 0.129723388671875, 0.12967532348632813, 0.13028140258789062, 0.1303726043701172, 0.13008793640136718, 0.13204173278808592, 0.13000090026855468, 0.1302650909423828, 0.12970188903808594, 0.13084979248046874, 0.12998963928222657, 0.12973773193359375, 0.13070130920410156, 0.26767672729492187, 0.13097874450683594, 0.12999679565429687, 0.1298524169921875, 0.12990463256835938, 0.12967730712890624, 0.130735107421875, 0.12993740844726562, 0.12993843078613282, 0.12966195678710937, 0.13075456237792968, 0.1301739501953125, 0.1298462677001953, 0.12979507446289062, 0.12965171813964843, 0.12960255432128906, 0.12983296203613282, 0.1297029113769531, 0.12987289428710938, 0.13024870300292968, 0.12961074829101563, 0.12961587524414062, 0.1304627227783203, 0.13114883422851562, 0.13138124084472655, 0.13123989868164063, 0.1312419891357422, 0.1315799102783203, 0.13145703125, 0.1315246124267578, 0.13152359008789063, 0.13154917907714844, 0.13147648620605468, 0.13150822448730468, 0.1314273223876953, 0.1317058563232422, 0.1314283447265625, 0.13160345458984374, 0.13148159790039063, 0.13152767944335939, 0.13164851379394532, 0.13085594177246093, 0.13025791931152345, 0.12963226318359375, 0.13061222839355469, 0.12971110534667968, 0.1318850860595703, 0.13038383483886717, 0.12956982421875, 0.13069512939453126, 0.12964761352539061, 0.1297664337158203, 0.1301851806640625, 0.13052313232421875, 0.1300756530761719, 0.131198974609375, 0.1302650909423828, 0.13156045532226562, 0.13132595825195312, 0.13137612915039062, 0.13114163208007812, 0.13150822448730468, 0.13148774719238282, 0.26988442993164063, 0.1297592315673828, 0.1303961639404297, 0.13102079772949218, 0.13180621337890625, 0.1311805419921875, 0.131198974609375, 0.1320120391845703, 0.13153382873535155, 0.13055078125, 0.13116621398925782, 0.13224858093261718, 0.1315379180908203, 0.13149183654785157, 0.13085594177246093, 0.1307740173339844, 0.13012069702148438, 0.13034701538085938, 0.13029273986816406, 0.12967219543457031, 0.12965785217285156, 0.13052621459960936, 0.1309951934814453, 0.13132908630371093, 0.12980528259277344, 0.12976431274414063, 0.130735107421875, 0.1307904052734375, 0.13091941833496093, 0.13070028686523438, 0.13097164916992188, 0.13049139404296875, 0.13055282592773437, 0.13094093322753905, 0.1304883270263672, 0.13196185302734376, 0.13015142822265624, 0.13110272216796875, 0.1304780731201172, 0.13022618103027345, 0.13066342163085937, 0.1300070343017578, 0.1298851776123047, 0.12999679565429687, 0.13136691284179688, 0.13021286010742186, 0.1300305938720703, 0.13127577209472657, 0.12978688049316406, 0.12988313293457032, 0.12990975952148437, 0.13098086547851562, 0.12965785217285156, 0.13045555114746094, 0.1315246124267578, 0.1313638458251953, 0.12958309936523438, 0.12952677917480468, 0.12959642028808593, 0.13055795288085936, 0.13012889099121094, 0.13161677551269532, 0.13049856567382812, 0.2679295959472656, 0.13007772827148437, 0.1300623016357422, 0.1296732177734375, 0.1302405090332031, 0.13101568603515626, 0.13053543090820313, 0.12958309936523438, 0.12989543151855468, 0.13040640258789063, 0.12972032165527345, 0.12959744262695314, 0.12965682983398438, 0.12967628479003906, 0.1296046142578125, 0.12968960571289062, 0.1309102020263672, 0.13147955322265625, 0.13136282348632813, 0.13167820739746094, 0.1325506591796875, 0.1314693145751953, 0.1313454132080078, 0.1313280029296875, 0.13255577087402343, 0.1314273223876953, 0.13156965637207033, 0.13132698059082032, 0.1305374755859375, 0.1306746826171875, 0.1309071350097656, 0.13065113830566405, 0.13108326721191407, 0.1307310333251953, 0.13129315185546875, 0.13122866821289061, 0.13088768005371093, 0.13070541381835937, 0.1310146484375, 0.13133619689941406, 0.1298104248046875, 0.13102490234375, 0.13102694702148437, 0.13097779846191407, 0.13345689392089843, 0.13114060974121095, 0.13085081481933594, 0.13208985900878906, 0.13153689575195313, 0.13162701416015626, 0.13186355590820312, 0.13157069396972657, 0.13132492065429688, 0.13146829223632814, 0.13144781494140625, 0.13143244934082032, 0.13133209228515624, 0.13199154663085938, 0.13162086486816407, 0.13136589050292968, 0.13134745788574217, 0.13149900817871094, 0.13125018310546874, 0.27049984741210936, 0.13173965454101563, 0.13157376098632811, 0.13129933166503907, 0.13149183654785157, 0.13152255249023437, 0.13144268798828124, 0.13151744079589844, 0.13144781494140625, 0.1312665557861328, 0.1313638458251953, 0.1312542724609375, 0.13116621398925782, 0.1316812744140625, 0.1304279022216797, 0.13107609558105468, 0.13173452758789062, 0.13157273864746094, 0.13156761169433595, 0.131114013671875, 0.13155325317382813, 0.13144883728027343, 0.13135667419433594, 0.1312972869873047, 0.13135154724121093, 0.13139456176757813, 0.13143653869628907, 0.13130137634277345, 0.13196800231933595, 0.1318707275390625, 0.131631103515625, 0.1325455322265625, 0.13174578857421876, 0.13033779907226561, 0.12979814147949217, 0.129902587890625, 0.13098188781738282, 0.13004287719726562, 0.13019442749023438, 0.12989439392089844, 0.13051187133789063, 0.12981350708007813, 0.12995890808105467, 0.13021900939941405, 0.12963839721679687, 0.12967628479003906, 0.13002546691894531, 0.13009510803222657, 0.1296711730957031, 0.13236019897460938, 0.12973362731933594, 0.13032858276367187, 0.13070643615722657, 0.1302425537109375, 0.1299220428466797, 0.12974490356445312, 0.12966812133789063, 0.12980630493164064, 0.129623046875, 0.13014938354492187, 0.13049958801269532, 0.12993023681640625, 0.1298841552734375, 0.26678680419921874, 0.1296680908203125, 0.12964556884765624, 0.12968447875976563, 0.12973260498046876, 0.129765380859375, 0.1300142059326172, 0.1296855010986328, 0.12954725646972656, 0.1295247344970703, 0.12960870361328125, 0.1295667266845703, 0.12963941955566408, 0.12973158264160156, 0.13008895874023438, 0.13011558532714843, 0.12971827697753907, 0.12998963928222657, 0.12966706848144532, 0.12973568725585938, 0.12970803833007813, 0.1298350067138672, 0.1297090606689453, 0.12965274047851563, 0.12967730712890624, 0.13019648742675782, 0.12975820922851564, 0.12972854614257812, 0.12970799255371093, 0.1302794189453125, 0.12972647094726564, 0.12966400146484375, 0.12970086669921874, 0.12959333801269532, 0.1299220428466797, 0.1300633544921875, 0.1296537628173828, 0.12968038940429688, 0.1319403839111328, 0.12973974609375, 0.13016064453125, 0.13014527893066405, 0.1306234893798828, 0.1300377655029297, 0.13088461303710938, 0.13168946838378906, 0.1299251251220703, 0.12965580749511718, 0.12978688049316406, 0.13135565185546874, 0.13036749267578124, 0.12981248474121093, 0.1297510986328125, 0.1297816925048828, 0.12961383056640624, 0.1298667449951172, 0.12978175354003907, 0.12962611389160156, 0.13000294494628906, 0.12986572265625, 0.1298462677001953, 0.12970803833007813, 0.12991897583007814, 0.2678077392578125, 0.13065216064453125, 0.12982272338867187, 0.13018418884277344, 0.13069625854492187, 0.12963424682617186, 0.12971827697753907, 0.12956063842773438, 0.13137094116210937, 0.13036647033691406, 0.12986265563964844, 0.12970803833007813, 0.1298462677001953, 0.12968447875976563, 0.1297029113769531, 0.12973670959472655, 0.12944793701171875, 0.12968345642089843, 0.1307125701904297, 0.12972134399414062, 0.12976742553710938, 0.12971929931640624, 0.12958309936523438, 0.13087026977539062, 0.12964659118652344, 0.1298032684326172, 0.12998757934570312, 0.13012275695800782, 0.12993536376953124, 0.1304698944091797, 0.1299988555908203, 0.12983807373046874, 0.13000192260742188, 0.13018623352050782, 0.1299199981689453, 0.13019648742675782, 0.13000090026855468, 0.13050265502929687, 0.12989645385742188, 0.12952677917480468, 0.12986778259277343, 0.13002546691894531, 0.13098597717285157, 0.129828857421875, 0.12967730712890624, 0.13003981018066407, 0.1295636444091797, 0.12984115600585938, 0.12994764709472656, 0.13031321716308594, 0.12971315002441405, 0.12961485290527344, 0.12971417236328125, 0.12996202087402345, 0.13037257385253906, 0.12986061096191406, 0.12995584106445313, 0.13010226440429687, 0.1315707550048828, 0.1297571258544922, 0.12957183837890626, 0.12970188903808594, 0.13077810668945314, 0.2692198486328125, 0.13007154846191407, 0.12988313293457032, 0.12971212768554688, 0.12965481567382814, 0.12962608337402343, 0.12967219543457031, 0.12959231567382812, 0.12979200744628908, 0.1296680908203125, 0.13002957153320313, 0.12997938537597656, 0.12960153198242189, 0.13012786865234374, 0.13016986083984375, 0.12967219543457031, 0.1310576629638672, 0.12998348999023437, 0.12976332092285156, 0.1300869140625, 0.1296824951171875, 0.13126751708984374, 0.12993536376953124, 0.13007359313964845, 0.12969984436035156, 0.12999679565429687, 0.1298698272705078, 0.1300858917236328, 0.1298913269042969, 0.1299404754638672, 0.13012275695800782, 0.13052313232421875, 0.1302425537109375, 0.13017805480957031, 0.12996812438964844, 0.12995071411132814, 0.13004083251953125, 0.130302978515625, 0.1300623321533203, 0.13056716918945313, 0.1300695037841797, 0.1299261474609375, 0.12973260498046876, 0.13010330200195314, 0.1296855010986328, 0.13009613037109374, 0.12976742553710938, 0.12981350708007813, 0.12980429077148437, 0.12988723754882814, 0.1295564727783203, 0.12995277404785155, 0.12949913024902343, 0.12956877136230469, 0.129870849609375, 0.13016371154785156, 0.12968754577636718, 0.1295667266845703, 0.12944383239746093, 0.1296282196044922, 0.1295543670654297, 0.12969778442382812, 0.12969062805175782, 0.26870681762695314, 0.1297827911376953, 0.1297592315673828, 0.131557373046875, 0.12989439392089844, 0.13009408569335937, 0.1305999298095703, 0.1311446990966797, 0.13035110473632813, 0.13046885681152343, 0.1303521270751953, 0.13139045715332032, 0.1301749725341797, 0.1304627227783203, 0.13010841369628906, 0.1311856689453125, 0.1305753936767578, 0.1308395233154297, 0.12992515563964843, 0.13041970825195312, 0.13009100341796875, 0.1309265594482422, 0.130155517578125, 0.13011354064941405, 0.12959437561035156, 0.1299148864746094, 0.13058047485351562, 0.13014527893066405, 0.13060198974609374, 0.12966400146484375, 0.12963533020019533, 0.1295380554199219, 0.12953395080566407, 0.1297786865234375, 0.13530213928222656, 0.13242477416992188, 0.13271443176269532, 0.1324779510498047, 0.13233970642089843, 0.13220352172851563, 0.1323294677734375, 0.13220556640625, 0.1319833526611328, 0.1321994171142578, 0.13245542907714844, 0.132347900390625, 0.13226495361328125, 0.13233561706542968, 0.13197415161132814, 0.13208883666992188, 0.13216461181640626, 0.1321881561279297, 0.13220249938964843, 0.13227622985839843, 0.1321246795654297, 0.13223423767089842, 0.13205606079101562, 0.13206629943847656, 0.13218611145019532, 0.13227622985839843, 0.13237759399414062, 0.13250457763671875, 0.13218917846679687, 0.27291647338867187, 0.13057228088378905, 0.12985139465332032, 0.12977766418457032, 0.12981146240234376, 0.1296363525390625, 0.12980120849609375, 0.12959642028808593, 0.12995993041992188, 0.129691650390625, 0.12974592590332032, 0.12970086669921874, 0.12972647094726564, 0.1295984649658203, 0.12972032165527345, 0.13100953674316407, 0.13002957153320313, 0.13050367736816407, 0.13074021911621095, 0.13157478332519532, 0.12986880493164063, 0.12991693115234376, 0.12960050964355468, 0.12965682983398438, 0.13195468139648436, 0.13054464721679687, 0.1317058563232422, 0.13119078063964842, 0.13114982604980469, 0.1297049560546875, 0.12969573974609375, 0.12971315002441405, 0.12958412170410155, 0.1308958740234375, 0.1309696044921875, 0.12978994750976564, 0.12954112243652344, 0.12962713623046876, 0.12967832946777344, 0.12949913024902343, 0.12971827697753907, 0.12982989501953124, 0.1297622985839844, 0.12973464965820314, 0.12974490356445312, 0.1297244110107422, 0.13159628295898437, 0.13082418823242187, 0.1297407989501953, 0.12985548400878907, 0.1299814453125, 0.13154815673828124, 0.13089791870117187, 0.12983602905273436, 0.1315010528564453, 0.13150003051757814, 0.132068359375, 0.13167718505859374, 0.13150309753417969, 0.13090509033203124, 0.12976332092285156, 0.1298053741455078, 0.1295963592529297]",tokens/s,7.549042304619914,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33a9-2df2dc967a4982e70518bca6;7165ad84-32ab-4b71-9272-a799d644c7e4) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2286.108672,9584.508928,0.0,8938.061824,8628.937728,s,10,10.131530883789061,1.0131530883789062,0.001088448228780468,1.0127930908203124,1.0144804138183594,1.0150706817626953,1.015542896118164,"[1.0156609497070312, 1.0116715698242187, 1.0127140502929688, 1.012624267578125, 1.0126713256835937, 1.0143492431640626, 1.0128721313476563, 1.0132860717773438, 1.013524658203125, 1.0121566162109374]",tokens/s,252.6765233570105,kWh,1.1963782442940607e-05,6.5549710930463335e-06,5.5387488754404804e-05,7.390624229039174e-05,tokens/kWh,3463848.1414618148,MB,2286.108672,9584.508928,0.0,8938.061824,8715.664896,s,10,592.3277656249999,59.232776562499986,0.006289011500180114,59.23276171875,59.24203125,59.24248828125,59.24285390625,"[59.23234765625, 59.22259375, 59.2419296875, 59.22881640625, 59.23756640625, 59.22535546875, 59.22958984375, 59.23317578125, 59.2429453125, 59.2334453125]",tokens/s,1.0636003182043137,kWh,0.0006992366234295897,0.00038324170626838165,0.0032439279284735946,0.004326406258171565,tokens/kWh,14561.739291359381,,s,629,600.5023488159183,0.9546937183082955,0.12033659124895162,0.9401558837890625,0.9410275512695312,0.9412470825195313,1.95283546875,"[0.94150146484375, 0.940000244140625, 0.9400443115234375, 0.9407887573242187, 0.9415792846679687, 0.9401016235351562, 0.9407989501953125, 0.939236328125, 0.939652099609375, 0.9404119262695313, 0.9412341918945313, 0.9400452880859375, 0.940031982421875, 0.9407703247070313, 0.9399039916992188, 0.9399715576171875, 0.9395128173828124, 0.9409924926757812, 0.9403135986328125, 0.939736083984375, 0.9405081787109375, 0.940558349609375, 0.9399757080078125, 0.9390223388671874, 0.9397933959960938, 0.9407989501953125, 0.9399931030273437, 0.9401292724609375, 0.9413304443359375, 0.9404334106445312, 0.9403258666992188, 0.9405870361328125, 0.9406074829101563, 0.9403986206054687, 0.9400125732421875, 0.941048828125, 0.9410816040039063, 0.9399193725585937, 0.94055322265625, 0.9405183715820312, 0.9404334106445312, 0.9397545166015625, 0.940400634765625, 0.9393602294921874, 0.94025830078125, 0.94036376953125, 0.9395015869140625, 0.9403494262695312, 0.9393715209960938, 0.9401753540039063, 0.939345947265625, 0.9394923706054688, 0.9394974975585938, 0.940368896484375, 0.9394483032226563, 0.939377685546875, 0.9410477905273438, 0.9396592407226563, 0.9396121826171875, 0.9394268188476562, 0.940126220703125, 0.939109375, 1.9525693359375, 0.9394636840820313, 0.9393213500976563, 0.9392977905273437, 0.9397432250976563, 0.940037109375, 0.9395179443359375, 0.9403463745117188, 0.9393008422851562, 0.93964697265625, 0.9392588500976562, 0.9400934448242187, 0.9392957153320313, 0.9392506713867188, 0.9399244995117187, 0.9402685546875, 0.9394237670898438, 0.9390971069335937, 0.940157958984375, 0.9403064575195312, 0.9402173461914063, 0.939841552734375, 0.9407047729492187, 0.939747314453125, 0.9393909912109375, 0.9412290649414062, 0.940248046875, 0.9398323364257812, 0.9403709716796875, 0.9403074340820312, 0.9406505126953125, 0.940516357421875, 0.9410795288085938, 0.9408665771484375, 0.94008935546875, 0.940015625, 0.940010498046875, 0.9404405517578125, 0.94000537109375, 0.9402777709960938, 0.9398855590820312, 0.9399234619140625, 0.9397626953125, 0.9402910766601562, 0.9401549072265625, 0.940037109375, 0.9408952026367188, 0.9404805297851563, 0.9399664916992188, 0.9401907348632812, 0.94055322265625, 0.9397831420898437, 0.93945751953125, 0.9397196655273438, 0.9400934448242187, 0.9397329711914062, 0.9398640747070313, 0.94013232421875, 0.9406156616210938, 0.9400360717773437, 0.9403074340820312, 0.9402255249023438, 0.9400115356445312, 1.95331689453125, 0.9401005859375, 0.93952099609375, 0.939863037109375, 0.9400064086914063, 0.9403811645507812, 0.9397329711914062, 0.9406719970703125, 0.94033203125, 0.9403596801757812, 0.9412177734375, 0.9406023559570312, 0.9398558959960938, 0.94081640625, 0.9405286254882812, 0.9401968383789062, 0.9398538208007813, 0.9395271606445312, 0.940263427734375, 0.9397442626953125, 0.9402941284179688, 0.941149169921875, 0.9406617431640625, 0.9412741088867187, 0.940669921875, 0.9410816040039063, 0.9408839721679687, 0.939631591796875, 0.9415341796875, 0.9398968505859375, 0.9399746704101563, 0.940179443359375, 0.9405819091796875, 0.9400064086914063, 0.9398374633789063, 0.9410344848632812, 0.9401630859375, 0.9402439575195313, 0.9399183349609375, 0.9410057983398438, 0.9402869873046875, 0.9402777709960938, 0.9403402099609375, 0.941686767578125, 0.9396182861328125, 0.9397001953125, 0.9402654418945312, 0.9400330200195313, 0.940537841796875, 0.9406842651367188, 0.9406279907226562, 0.9398609619140625, 0.9404866333007813, 0.9411369018554687, 0.9399429321289062, 0.939873291015625, 0.9398394775390625, 0.9408798828125, 0.9406709594726562, 0.94021630859375, 0.9408256225585937, 0.940242919921875, 0.9397821655273437, 1.95401318359375, 0.9401282348632812, 0.9397453002929688, 0.9401876220703125, 0.9404436645507812, 0.9402992553710937, 0.9395773315429687, 0.9403228149414062, 0.939809814453125, 0.939509765625, 0.9397462768554687, 0.9422510375976563, 0.9400678100585937, 0.9396019287109375, 0.9398917236328125, 0.9403330688476562, 0.9396807861328125, 0.9395588989257813, 0.9399459838867188, 0.9394462890625, 0.939831298828125, 0.9402726440429687, 0.9402921142578125, 0.9396920166015625, 0.939767822265625, 0.9400340576171875, 0.9404702758789063, 0.9402992553710937, 0.9400657958984375, 0.93994189453125, 0.9394892578125, 0.9398200073242188, 0.9408123168945313, 0.9402921142578125, 0.9403504638671875, 0.940864501953125, 0.9403279418945313, 0.9399326782226562, 0.940169189453125, 0.9409740600585937, 0.9400985717773438, 0.9403341064453125, 0.9408727416992188, 0.9405614013671875, 0.940031005859375, 0.9394452514648437, 0.9399480590820313, 0.9405870361328125, 0.9398425903320312, 0.9403135986328125, 0.9397453002929688, 0.939378662109375, 0.9397616577148438, 0.9410897827148438, 0.9408286743164063, 0.9397012329101563, 0.9395425415039063, 0.9402388305664062, 0.9399132080078125, 0.94078564453125, 0.9406279907226562, 0.9397565307617187, 0.9397637329101562, 1.9536650390625, 0.9407723388671875, 0.939578369140625, 0.9401507568359375, 0.940353515625, 0.9400514526367187, 0.9401712646484375, 0.9398651123046875, 0.9396582641601563, 0.9398251342773437, 0.9404794921875, 0.9411246337890625, 0.9405941772460937, 0.940052490234375, 0.9404395751953125, 0.941676513671875, 0.9395343627929688, 0.9395845336914063, 0.939747314453125, 0.9404139404296875, 0.9399725952148438, 0.9405081787109375, 0.940210205078125, 0.9395486450195313, 0.9399818115234375, 0.94059521484375, 0.9396868896484375, 0.9403258666992188, 0.9402019653320313, 0.93954150390625, 0.9396776733398438, 0.9398087768554687, 0.9410938720703125, 0.9401641235351562, 0.939953125, 0.9407017211914063, 0.940769287109375, 0.9399070434570312, 0.9406832885742188, 0.9408511962890626, 0.93992041015625, 0.93991015625, 0.9406443481445312, 0.9401129150390625, 0.9394769897460937, 0.9398220825195313, 0.940062744140625, 0.9406146850585938, 0.9397760009765626, 0.94078466796875, 0.9406842651367188, 0.9410416870117188, 0.9400278930664062, 0.9406760864257813, 0.9400023193359375, 0.9400360717773437, 0.9401005859375, 0.9414287109375, 0.9405501708984375, 0.940611572265625, 0.9412608032226563, 0.9403648071289062, 0.94048974609375, 1.9550003662109374, 0.9402306518554687, 0.9403934936523437, 0.9403607177734375, 0.9412402954101563, 0.9396428833007813, 0.9399511108398437, 0.939725830078125, 0.939884521484375, 0.9398927612304687, 0.940263427734375, 0.9413529663085938, 0.940885986328125, 0.9410529174804687, 0.9415485229492188, 0.9412464599609375, 0.940295166015625, 0.939989013671875, 0.9397749633789062, 0.9401620483398437, 0.9402521362304688, 0.940422119140625, 0.9397033081054688, 0.9398681640625, 0.94054296875, 0.9411123046875, 0.9401774291992188, 0.9401016235351562, 0.9397852172851563, 0.9397985229492187, 0.9396900024414062, 0.939810791015625, 0.93952001953125, 0.9393899536132813, 0.9396654052734374, 0.941065185546875, 0.9407026977539062, 0.9396029663085937, 0.9407150268554687, 0.9401466674804687, 0.9398435668945313, 0.9395322875976563, 0.9403463745117188, 0.9394933471679687, 0.9393858642578125, 0.939447265625, 0.9401026611328125, 0.9395661010742188, 0.939377685546875, 0.9397606201171875, 0.939526123046875, 0.9393858642578125, 0.9394186401367187, 0.9398968505859375, 0.9392496337890625, 0.9398272094726563, 0.9395732421875, 0.9405542602539062, 0.93991015625, 0.940548095703125, 0.9398589477539062, 0.9403648071289062, 0.939809814453125, 1.95293896484375, 0.93994189453125, 0.9393387451171875, 0.9408471069335937, 0.9404620971679688, 0.9404794921875, 0.9408706665039063, 0.9407283325195313, 0.9402695922851563, 0.940379150390625, 0.9420646362304688, 0.9404036865234375, 0.9405603637695312, 0.9400238037109375, 0.94124853515625, 0.9406914672851563, 0.9400872802734375, 0.9396541137695312, 0.9395056762695313, 0.9401641235351562, 0.94051123046875, 0.9398302612304688, 0.9397872924804688, 0.939341796875, 0.9398619995117188, 0.9404784545898438, 0.9394800415039063, 0.9401866455078125, 0.93954150390625, 0.9394053344726563, 0.940663818359375, 0.941427734375, 0.9398405151367187, 0.9396613159179688, 0.939568115234375, 0.94137548828125, 0.9404170532226562, 0.93991015625, 0.9405634765625, 0.9408040771484375, 0.9399254760742187, 0.9397933959960938, 0.9401558837890625, 0.9402664794921874, 0.9399244995117187, 0.9395875854492187, 0.9407119140625, 0.9401763916015625, 0.9404497680664062, 0.9396480712890625, 0.939274169921875, 0.9396900024414062, 0.9406156616210938, 0.9394974975585938, 0.9394503784179687, 0.9392230224609375, 0.939916259765625, 0.9409976196289063, 0.939694091796875, 0.9403258666992188, 0.9398446044921875, 0.9401170043945313, 0.9398978271484375, 1.9538472900390624, 0.9399326782226562, 0.9398046875, 0.9415096435546875, 0.9401118774414062, 0.9396039428710937, 0.9404282836914063, 0.9398599853515625, 0.9395968017578125, 0.9396162719726563, 0.9403279418945313, 0.9400186767578125, 0.9399378051757813, 0.939483154296875, 0.9413427124023438, 0.939953125, 0.9398405151367187, 0.9391646728515625, 0.9401661376953125, 0.9404569702148438, 0.9409935302734375, 0.9405778198242187, 0.9403176879882813, 0.9402091674804688, 0.9410509033203125, 0.9403566284179687, 0.9398660888671875, 0.9406289672851562, 0.9398743286132812, 0.9399254760742187, 0.9395353393554687, 0.9404968872070313, 0.9400115966796875, 0.93969091796875, 0.940168212890625, 0.9411409912109375, 0.940242919921875, 0.9399797973632813, 0.9398435668945313, 0.9398876342773438, 0.939953125, 0.94072216796875, 0.940315673828125, 0.9397380981445312, 0.9401937866210938, 0.9397545166015625, 0.9420257568359375, 0.9404149780273438, 0.94042626953125, 0.94061669921875, 0.9402767333984375, 0.9410713500976563, 0.9403862915039063, 0.9398855590820312, 0.9397760009765626, 0.940189697265625, 0.9404805297851563, 0.9394237670898438, 0.9399490356445312, 0.9412474975585937, 0.93998388671875, 0.939931640625, 0.9399439086914062, 1.9538431396484375, 0.940669921875, 0.939863037109375, 0.9412310791015625, 0.939705322265625, 0.9403146362304687, 0.9399193725585937, 0.9399849243164062, 0.940326904296875, 0.9404139404296875, 0.9400852661132812, 0.9402255249023438, 0.9408552856445312, 0.940031005859375, 0.9410283813476562, 0.940389404296875, 0.940705810546875, 0.9393633422851563, 0.9402235107421875, 0.9409392700195313, 0.941180908203125, 0.9399746704101563, 0.9405009765625, 0.9404282836914063, 0.940821533203125, 0.9403033447265625, 0.9397268676757813, 0.94030029296875, 0.9399244995117187, 0.9394667358398437, 0.9398067016601562, 0.941180908203125, 0.9402276000976563, 0.9401077880859375, 0.940368896484375, 0.9423933715820313, 0.940284912109375, 0.9403238525390625, 0.9398333740234375, 0.9402501220703126, 0.9403607177734375, 0.9407989501953125, 0.9403515014648437, 0.9402286376953125, 0.9402235107421875, 0.9409075317382812, 0.940410888671875, 0.9398958129882813, 0.9406812133789062, 0.940147705078125, 0.9399982299804688, 0.9407620849609375, 0.9411195068359375, 0.940062744140625, 0.9402501220703126, 0.9403064575195312, 0.9409515380859375, 0.939799560546875, 0.9402296142578125, 0.940310546875, 0.9400381469726562, 0.94015283203125, 0.9409638671875, 1.952026611328125, 0.9399254760742187, 0.9394933471679687, 0.9416345825195312, 0.9399859008789062, 0.9406945190429687, 0.9396664428710938, 0.940632080078125, 0.9401558837890625, 0.940590087890625, 0.94102734375, 0.940400634765625, 0.940221435546875, 0.9404784545898438, 0.940031982421875, 0.9396961059570312, 0.9399644165039063, 0.9391165161132813, 0.9401170043945313, 0.9396141967773437, 0.9402808227539062, 0.9396408081054688, 0.94097509765625, 0.9396275024414062, 0.9407098999023438, 0.939484130859375, 0.9403135986328125, 0.940037109375, 0.9396387939453125, 0.9410057983398438, 0.940732421875, 0.9403883666992188, 0.9400033569335937, 0.9398814697265625, 0.9403955078125, 0.9411707153320312, 0.940169189453125, 0.9412464599609375, 0.939968505859375, 0.9397186279296875, 0.9395138549804688, 0.9404170532226562, 0.94005859375, 0.940073974609375, 0.940353515625, 0.9408685913085938, 0.9408317260742187, 0.9399654541015625, 0.9402081298828125, 0.9396746215820313, 0.9398169555664062, 0.939715576171875, 0.9401446533203125, 0.9400780639648437, 0.9395189819335937, 0.9397626953125, 0.9406863403320312, 0.9393438720703124, 0.9411901245117188, 0.941717529296875, 0.9408173828125, 0.9400023193359375, 0.94002685546875]",tokens/s,1.0474563525692686,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1984.512,5480.382464,0.0,4833.93536,4503.282688,s,10,5.704772033691406,0.5704772033691407,0.0012601125399919193,0.5702384033203125,0.5718997436523438,0.5722093933105469,0.5724571130371093,"[0.5703561401367188, 0.57251904296875, 0.5679871215820312, 0.569779052734375, 0.569513427734375, 0.5698793334960938, 0.5701206665039062, 0.571189208984375, 0.5718309326171875, 0.5715971069335938]",tokens/s,448.7471164283303,kWh,6.715062223834757e-06,3.6795454622733808e-06,3.136956213266472e-05,4.1764169818772856e-05,tokens/kWh,6129656.140918401,MB,1986.158592,5480.382464,0.0,4833.93536,4688.699392,s,10,334.56283984375,33.456283984375,0.0052644785154184115,33.45499609375,33.46062109375,33.46519140625,33.46884765625,"[33.4560859375, 33.4513203125, 33.45960546875, 33.45390625, 33.45316015625, 33.46976171875, 33.4577109375, 33.45030078125, 33.45712109375, 33.4538671875]",tokens/s,1.883054317371969,kWh,0.0003949493847824173,0.00021646648750867824,0.0018252696453997421,0.0024366855176908373,tokens/kWh,25854.793137073728,,s,629,339.16590026855516,0.5392144678355401,0.06782032837407061,0.5309541625976563,0.5315696655273437,0.5319477294921875,1.101242978515625,"[0.5305211181640626, 0.5310648193359375, 0.5307863159179688, 0.53155224609375, 0.5311129760742187, 0.5313126220703125, 0.5308641357421875, 0.5310208129882813, 0.530787353515625, 0.5312348022460938, 0.5306480712890626, 0.5307863159179688, 0.5306419067382813, 0.5310637817382813, 0.530830322265625, 0.5309419555664062, 0.5309429931640625, 0.531325927734375, 0.5308753662109374, 0.5309122314453125, 0.5310187377929687, 0.5310218505859375, 0.5317243041992188, 0.5311610717773437, 0.5310167236328125, 0.5312348022460938, 0.530935791015625, 0.5312266235351563, 0.53100439453125, 0.5311580200195313, 0.5308682250976563, 0.5309685668945312, 0.5309716186523438, 0.5314590454101562, 0.5310986328125, 0.53151025390625, 0.5308231811523437, 0.5309214477539063, 0.5307422485351563, 0.5312634887695312, 0.5309501342773437, 0.5314273071289063, 0.530819091796875, 0.53110986328125, 0.5307781372070313, 0.53089892578125, 0.5306757202148438, 0.53097265625, 0.5308671875, 0.530881591796875, 0.5306766967773437, 0.53083544921875, 0.5309061279296875, 0.5309368286132813, 0.531083251953125, 0.5310986328125, 0.531441650390625, 0.531240966796875, 0.5315379028320313, 0.5314641723632813, 0.5310187377929687, 0.5314037475585938, 1.103942626953125, 0.5305426025390625, 0.5309706420898438, 0.530461669921875, 0.5309747314453125, 0.530756591796875, 0.5311068115234375, 0.5308692626953125, 0.5308150024414062, 0.5308845825195313, 0.5310484619140625, 0.5306859741210938, 0.5307606811523438, 0.5305077514648437, 0.5314058227539062, 0.53094091796875, 0.5307955322265625, 0.5311867065429687, 0.53100341796875, 0.5308580322265625, 0.5310903930664063, 0.5308118896484375, 0.5307975463867187, 0.5306583251953125, 0.5308969116210938, 0.530861083984375, 0.5308999633789062, 0.5306531982421875, 0.5309398803710937, 0.5309183959960937, 0.5308845825195313, 0.530808837890625, 0.5309910888671875, 0.5313074951171874, 0.5310607299804687, 0.530740234375, 0.5311702880859375, 0.53077197265625, 0.5312318115234375, 0.5309869384765625, 0.5307576293945313, 0.5307412719726563, 0.5308344116210938, 0.5307822265625, 0.5309531860351563, 0.5305784301757812, 0.5323253784179688, 0.5312071533203125, 0.5312880859375, 0.5308795166015625, 0.5310740356445313, 0.5312593994140625, 0.5309235229492187, 0.531114990234375, 0.5311170654296875, 0.5312634887695312, 0.5309481201171875, 0.5309020385742188, 0.5311262817382812, 0.5310023803710937, 0.5309122924804688, 0.5308917236328125, 0.5311027221679687, 1.100294189453125, 0.5306101684570312, 0.530904052734375, 0.5310392456054688, 0.5307822265625, 0.530640869140625, 0.5311344604492187, 0.5307207641601562, 0.531220458984375, 0.5305763549804687, 0.530935791015625, 0.5307678833007813, 0.5314918212890625, 0.5320335083007812, 0.5323519897460938, 0.5317181396484375, 0.5310802001953125, 0.5309747314453125, 0.5312337646484375, 0.5312235717773437, 0.5314109497070313, 0.5310474243164063, 0.5315625, 0.5313208618164063, 0.5311416015625, 0.531430419921875, 0.5316690063476562, 0.5314365234375, 0.531483642578125, 0.530713623046875, 0.5309296875, 0.5309389038085938, 0.5307412719726563, 0.5305579223632813, 0.530681884765625, 0.5306705932617187, 0.5307053833007812, 0.5305855712890625, 0.5307422485351563, 0.53066650390625, 0.5308958740234375, 0.532068359375, 0.532295654296875, 0.5322301635742187, 0.5322598266601563, 0.5318870849609375, 0.5309050903320313, 0.5312890625, 0.5309685668945312, 0.5306531982421875, 0.5310576782226563, 0.530798583984375, 0.5307260131835938, 0.5306234130859375, 0.5307207641601562, 0.5306265869140625, 0.5307678833007813, 0.530714599609375, 0.5308375244140625, 0.53097265625, 0.5309849853515625, 0.5310422973632812, 0.5310628051757813, 1.1018045654296875, 0.5307658081054687, 0.5312337646484375, 0.5306675415039063, 0.5308784790039063, 0.5308600463867188, 0.5310167236328125, 0.531009521484375, 0.5312532348632812, 0.5308733520507812, 0.5311190795898437, 0.5311856689453125, 0.5314058227539062, 0.5310842895507812, 0.5315277099609375, 0.5310382080078125, 0.5311743774414063, 0.5315983276367188, 0.5321942749023437, 0.5316853637695312, 0.5308467407226563, 0.5309767456054687, 0.530850830078125, 0.5307125854492187, 0.5308641357421875, 0.5307555541992187, 0.5308436279296875, 0.5307218017578125, 0.5308006591796876, 0.5305630493164063, 0.5307371215820312, 0.530661376953125, 0.5308436889648438, 0.5309583129882812, 0.5307883911132812, 0.5307330322265625, 0.5312798461914062, 0.5310320434570313, 0.5316075439453125, 0.5309869995117188, 0.5313341674804688, 0.5305620727539062, 0.5311702880859375, 0.5308067626953125, 0.5318553466796875, 0.5307586669921875, 0.5308836059570312, 0.5307012939453125, 0.5307760620117188, 0.5307177124023438, 0.5308969116210938, 0.530735107421875, 0.5311006469726562, 0.5306951904296875, 0.5307473754882812, 0.5309122314453125, 0.5312890625, 0.5314263305664062, 0.530976806640625, 0.530935791015625, 0.530862060546875, 0.530808837890625, 0.5308170166015626, 1.100686279296875, 0.5305743408203125, 0.5307484130859375, 0.53056103515625, 0.5306112060546875, 0.5306911010742188, 0.5307637939453125, 0.5306419067382813, 0.5308395385742187, 0.530756591796875, 0.5308426513671874, 0.5308016357421875, 0.5311057739257813, 0.5309327392578125, 0.5313269653320313, 0.5308795166015625, 0.531177490234375, 0.5310422973632812, 0.53136279296875, 0.5311201171875, 0.5312051391601562, 0.53100439453125, 0.5311467895507812, 0.5310556030273438, 0.531061767578125, 0.5310576782226563, 0.5312481079101562, 0.5311692504882812, 0.5314559936523438, 0.5308375244140625, 0.5311375122070312, 0.5306767578125, 0.530946044921875, 0.53065625, 0.5307238159179688, 0.5309081420898437, 0.5308159790039062, 0.5306972045898437, 0.53074951171875, 0.5306746215820313, 0.5307698974609375, 0.5306572875976563, 0.530976806640625, 0.5309541625976563, 0.5310791625976562, 0.531009521484375, 0.5310802001953125, 0.5311498413085938, 0.5309020385742188, 0.5308917846679687, 0.5309102172851563, 0.5309030151367188, 0.5314529418945313, 0.5309439697265625, 0.5307105102539063, 0.5309010009765625, 0.5310361328125, 0.5321871337890625, 0.5309573364257812, 0.5309470825195313, 0.5312696533203125, 0.5320478515625, 0.5322874755859375, 1.10145947265625, 0.5313740844726562, 0.5319639282226563, 0.53125732421875, 0.5318215942382812, 0.5319761962890625, 0.5323817138671875, 0.5318543090820312, 0.5324503173828125, 0.53193115234375, 0.53076171875, 0.5305753784179688, 0.5308323974609375, 0.530567138671875, 0.5308436279296875, 0.5306286010742187, 0.5307053833007812, 0.5305753784179688, 0.53100341796875, 0.5306358032226562, 0.5310361328125, 0.5309552612304688, 0.5309389038085938, 0.53064501953125, 0.53108837890625, 0.5308590087890624, 0.5309389038085938, 0.5310259399414062, 0.5312952270507812, 0.5310709838867187, 0.5311764526367188, 0.5312491455078125, 0.5314150390625, 0.5312532348632812, 0.531252197265625, 0.5313546142578125, 0.531367919921875, 0.5319331665039062, 0.5335838623046875, 0.5311641845703124, 0.531441650390625, 0.5313925170898437, 0.5322434692382813, 0.5322864379882812, 0.5321441040039062, 0.532115478515625, 0.531388427734375, 0.5306961669921875, 0.53096240234375, 0.5312880859375, 0.53085693359375, 0.5306890869140625, 0.5307586059570313, 0.53071875, 0.5307207641601562, 0.53096240234375, 0.5315604248046875, 0.5309337768554687, 0.5310587158203125, 0.5309173583984375, 0.53144677734375, 0.530819091796875, 0.5310504760742187, 1.1015126953125, 0.5308446655273438, 0.53172021484375, 0.5307381591796875, 0.5307678833007813, 0.5306972045898437, 0.5308292846679687, 0.5307166748046875, 0.530808837890625, 0.5306388549804687, 0.5306736450195313, 0.5309235229492187, 0.5309235229492187, 0.53070849609375, 0.5309276123046875, 0.5306429443359375, 0.5307852783203125, 0.5307914428710937, 0.5309736938476562, 0.5309429931640625, 0.5308538818359375, 0.530862060546875, 0.530893798828125, 0.5308037109375, 0.530777099609375, 0.5308395385742187, 0.5308600463867188, 0.5307258911132813, 0.5308436279296875, 0.530629638671875, 0.531146728515625, 0.5308661499023437, 0.531294189453125, 0.5314826049804687, 0.5312481079101562, 0.53166796875, 0.5312839965820313, 0.53075048828125, 0.530819091796875, 0.5311170654296875, 0.5316137084960938, 0.5317027587890625, 0.5319608154296875, 0.5318799438476562, 0.5317857055664063, 0.531937255859375, 0.5316761474609375, 0.5316976928710937, 0.5317744750976563, 0.5308323974609375, 0.531051513671875, 0.5311907958984375, 0.5314478149414062, 0.531346435546875, 0.5310218505859375, 0.5310238647460938, 0.5312337646484375, 0.5313167114257813, 0.5308651733398437, 0.530703369140625, 0.5307924194335938, 0.5308026733398438, 0.5309132690429688, 1.1025264892578126, 0.53065625, 0.5309061279296875, 0.5318810424804687, 0.5307862548828125, 0.530639892578125, 0.53081396484375, 0.5309368286132813, 0.53110888671875, 0.5308538818359375, 0.5307095336914063, 0.5305200805664062, 0.5307893676757812, 0.5306808471679687, 0.530820068359375, 0.5304883422851563, 0.5308170166015626, 0.5304985961914063, 0.5307473754882812, 0.5305302734375, 0.5306531982421875, 0.530513916015625, 0.5310648193359375, 0.5307095336914063, 0.5307955322265625, 0.5309153442382812, 0.5314549560546875, 0.5309439697265625, 0.5312911376953126, 0.5316034545898437, 0.5309531860351563, 0.5309869995117188, 0.5310361328125, 0.5308016357421875, 0.5307443237304688, 0.5306705932617187, 0.5307801513671875, 0.530608154296875, 0.530639892578125, 0.5306634521484375, 0.5308221435546875, 0.5308283081054688, 0.5309696044921876, 0.5313065185546875, 0.5313300170898437, 0.5313269653320313, 0.5309522094726562, 0.531056640625, 0.5314488525390625, 0.5310975952148438, 0.5314590454101562, 0.53098291015625, 0.5310863647460937, 0.5309696044921876, 0.53103515625, 0.5311734008789063, 0.530808837890625, 0.530746337890625, 0.53139453125, 0.5307208251953125, 0.5316126708984374, 0.5313197631835938, 0.5314866943359375, 1.1037603759765624, 0.531114013671875, 0.5313157348632812, 0.5316669311523438, 0.5312061157226563, 0.5314129638671875, 0.5316454467773437, 0.531177490234375, 0.5316536865234375, 0.530905029296875, 0.5309481201171875, 0.5308477172851562, 0.5310320434570313, 0.5309378662109375, 0.5309757690429687, 0.5313228759765625, 0.5311948852539062, 0.5310637817382813, 0.5309552612304688, 0.5314559936523438, 0.5309481201171875, 0.5310003051757812, 0.5309910888671875, 0.5306224365234375, 0.5306593017578125, 0.5306255493164063, 0.5308753662109374, 0.5307627563476562, 0.5307576293945313, 0.530713623046875, 0.53089892578125, 0.530850830078125, 0.5307238159179688, 0.5306982421875, 0.5311590576171875, 0.530724853515625, 0.5312921752929688, 0.5312542724609375, 0.5312317504882812, 0.5316403198242188, 0.5309900512695312, 0.5310330810546875, 0.5311273193359375, 0.5309204711914063, 0.53094091796875, 0.5306009521484375, 0.530967529296875, 0.5310392456054688, 0.5308815307617187, 0.5307689208984375, 0.5308272705078125, 0.5306911010742188, 0.5308804931640625, 0.5311528930664062, 0.5310679321289062, 0.5312716674804687, 0.531794921875, 0.531114013671875, 0.5313935546875, 0.5312348022460938, 0.5312020263671875, 0.5310812377929688, 0.5309931640625, 1.102993408203125, 0.5305927124023437, 0.53087744140625, 0.530703369140625, 0.5310504760742187, 0.5310187377929687, 0.5312553100585937, 0.5311918334960938, 0.5312839965820313, 0.5307781372070313, 0.5309859619140626, 0.5311580200195313, 0.5308999633789062, 0.5308969116210938, 0.5311795043945312, 0.5306071166992188, 0.5307647705078125, 0.5309890747070313, 0.5307586669921875, 0.5306500854492188, 0.530893798828125, 0.530766845703125, 0.5308999633789062, 0.5307760620117188, 0.5306542358398437, 0.530555908203125, 0.5319547119140625, 0.53079345703125, 0.5315061645507813, 0.5311846313476563, 0.5316218872070313, 0.5312348022460938, 0.5321912231445313, 0.5312000122070313, 0.531431396484375, 0.5311488037109375, 0.5308743896484375, 0.530819091796875, 0.5310812377929688, 0.5307105102539063, 0.53081396484375, 0.5309736938476562, 0.53100439453125, 0.5308211059570312, 0.5308876953125, 0.5309081420898437, 0.5309849853515625, 0.5307340698242188, 0.53097265625, 0.5311795043945312, 0.5309010009765625, 0.5311529541015625, 0.5311456909179687, 0.5306522216796875, 0.5312962036132812, 0.5308641357421875, 0.5309163818359375, 0.5308590087890624, 0.5311928100585938, 0.5311190795898437, 0.5309706420898438, 0.530935791015625, 0.531294189453125]",tokens/s,1.8545496451794004,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3704, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1490, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1656, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1441.677312,1709.703168,0.0,1063.256064,942.605312,s,10,0.8730960388183594,0.08730960388183594,0.002035304872210584,0.08713673782348633,0.08910322875976563,0.09054942092895508,0.09170637466430664,"[0.09199561309814454, 0.0843604507446289, 0.08696444702148437, 0.08719612884521484, 0.08721517181396485, 0.08707734680175781, 0.08465267181396484, 0.0866138916015625, 0.08823846435546875, 0.08878185272216797]",tokens/s,2932.0943930345643,kWh,1.0099359104323139e-06,5.533989598310048e-07,2.5805410008034e-06,4.143875871066718e-06,tokens/kWh,61777912.265045814,MB,1441.677312,1709.703168,0.0,1063.256064,942.607872,s,10,53.98236181640625,5.398236181640624,0.0409841937132303,5.390563720703125,5.449904833984375,5.452959252929687,5.455402788085937,"[5.38043359375, 5.40069384765625, 5.41887353515625, 5.44691552734375, 5.456013671875, 5.44922607421875, 5.37513134765625, 5.35530859375, 5.36842431640625, 5.33134130859375]",tokens/s,11.670478630457612,kWh,6.202919682701882e-05,3.3993588802392576e-05,0.00015045469253839705,0.00024647747816780845,tokens/kWh,255601.4467054386,,s,629,54.67969024658198,0.08693114506610816,0.010438416792585154,0.08622489929199219,0.08683417816162109,0.08736747589111328,0.17189485961914075,"[0.08353689575195312, 0.08669286346435547, 0.08684236907958984, 0.08664883422851563, 0.08732160186767578, 0.0871107177734375, 0.08675321960449218, 0.08646348571777343, 0.08578867340087891, 0.08561151885986328, 0.08533920288085937, 0.08318355560302734, 0.08327986907958984, 0.08331366729736328, 0.08347750091552734, 0.08323891448974609, 0.08337407684326172, 0.08320102691650391, 0.08308223724365234, 0.08320921325683593, 0.08336589050292968, 0.08362290954589843, 0.08334028625488281, 0.08379084777832031, 0.08326246643066407, 0.08351641845703126, 0.08637849426269531, 0.0860948486328125, 0.08636211395263672, 0.0862208023071289, 0.0863825912475586, 0.08643379211425781, 0.08343961334228515, 0.08492339324951172, 0.08729497528076172, 0.08630579376220703, 0.08625049591064453, 0.08637340545654297, 0.08612451171875, 0.08631295776367187, 0.0867215347290039, 0.08637235260009765, 0.08630169677734376, 0.08631193542480468, 0.08447795104980468, 0.08495410919189453, 0.08645938873291016, 0.08668978881835937, 0.08635699462890625, 0.08412057495117188, 0.0861143035888672, 0.08629350280761719, 0.08610099029541016, 0.0859535369873047, 0.086508544921875, 0.08645426940917969, 0.0861286392211914, 0.08638976287841797, 0.08641228485107422, 0.08600883483886719, 0.08566067504882813, 0.08624947357177734, 0.16856166076660156, 0.08411341094970703, 0.0832890853881836, 0.08320819091796874, 0.08320409393310547, 0.08308633422851562, 0.08329011535644532, 0.08373766326904297, 0.08319993591308594, 0.08375193786621093, 0.08421683502197265, 0.08632524871826172, 0.0862371826171875, 0.08634674835205078, 0.08365670776367187, 0.0833986587524414, 0.08349798583984375, 0.08350003051757812, 0.08335769653320313, 0.08395059204101563, 0.0839208984375, 0.08677375793457032, 0.0867154541015625, 0.08665900421142578, 0.0864194564819336, 0.08657305908203125, 0.08616550445556641, 0.08598429107666015, 0.08726729583740235, 0.08673792266845703, 0.08624230194091796, 0.0869222412109375, 0.08644096374511719, 0.0866344985961914, 0.08689356994628906, 0.08668672180175781, 0.08644608306884766, 0.08659046173095702, 0.08682291412353516, 0.08598323059082032, 0.08748851013183594, 0.08676249694824219, 0.08683213043212891, 0.08647679901123047, 0.08678195190429687, 0.08769843292236328, 0.08739942169189453, 0.08640614318847656, 0.08639897918701171, 0.08644403076171875, 0.08633344268798829, 0.08639488220214844, 0.08622694396972656, 0.0867215347290039, 0.0865638427734375, 0.08646451568603515, 0.08639078521728516, 0.08660889434814453, 0.08628224182128906, 0.08633753967285156, 0.08671437072753906, 0.08621875, 0.08635084533691406, 0.17366220092773438, 0.08666828918457031, 0.08657817840576172, 0.08662118530273437, 0.08642969512939454, 0.08634265899658203, 0.0862525405883789, 0.08606208038330078, 0.08631501007080078, 0.08632012939453125, 0.08620851135253907, 0.08628530883789062, 0.08648499298095703, 0.08680038452148438, 0.08667135620117188, 0.08662732696533203, 0.08640409851074218, 0.08657817840576172, 0.08667033386230469, 0.08634060668945312, 0.08654131317138672, 0.08610816192626954, 0.08561766052246093, 0.08718950653076171, 0.08702668762207032, 0.08669388580322265, 0.08728883361816406, 0.08666214752197265, 0.08715264129638672, 0.08645017242431641, 0.08640409851074218, 0.08617984008789062, 0.08535756683349609, 0.0866324462890625, 0.08659967803955078, 0.08661299133300782, 0.08663346862792969, 0.08641843414306641, 0.08643379211425781, 0.08636006164550782, 0.08653209686279296, 0.0865771484375, 0.08655257415771485, 0.08642047882080078, 0.08629657745361329, 0.08657100677490234, 0.0862955551147461, 0.08327884674072265, 0.08376831817626954, 0.08357273864746094, 0.08349286651611328, 0.08337715148925781, 0.0860549087524414, 0.08430796813964844, 0.08347443389892578, 0.08344064331054687, 0.08356454467773437, 0.08309248352050781, 0.08637235260009765, 0.08680242919921875, 0.0863078384399414, 0.08653311920166015, 0.08646041870117188, 0.1741107177734375, 0.08636214447021484, 0.08619312286376953, 0.08634162902832031, 0.08638976287841797, 0.08647885131835938, 0.08649215698242188, 0.08664064025878906, 0.08642771148681641, 0.08624428558349609, 0.08617369842529297, 0.08573542022705079, 0.08636109161376954, 0.08620134735107422, 0.086329345703125, 0.08637644958496093, 0.08652902221679687, 0.08682086181640625, 0.08738508605957031, 0.0865249252319336, 0.0863815689086914, 0.0862894058227539, 0.08674918365478515, 0.08637337493896484, 0.08735027313232421, 0.08632422637939453, 0.08617984008789062, 0.08628428649902344, 0.08778956604003907, 0.08680754852294922, 0.08660582733154297, 0.08646348571777343, 0.08672358703613281, 0.0874486083984375, 0.08708297729492187, 0.08639078521728516, 0.08637337493896484, 0.08624742126464843, 0.08626278686523438, 0.08637651062011718, 0.08677881622314453, 0.08631501007080078, 0.08682803344726563, 0.0861112289428711, 0.08643583679199218, 0.08626380920410157, 0.08634572601318359, 0.086181884765625, 0.08633036804199219, 0.08633856201171874, 0.08504627227783203, 0.08628121948242187, 0.08638771057128906, 0.08684748840332031, 0.0863825912475586, 0.08656588745117187, 0.08662528228759765, 0.08654131317138672, 0.08605388641357421, 0.08622694396972656, 0.0862955551147461, 0.08632524871826172, 0.08606412506103515, 0.1738434600830078, 0.0866170883178711, 0.08668978881835937, 0.0868823013305664, 0.08633548736572266, 0.08676761627197266, 0.08665599822998046, 0.08651468658447266, 0.08632012939453125, 0.08640306854248046, 0.08641433715820312, 0.0875315170288086, 0.08692838287353516, 0.0863477783203125, 0.0873861083984375, 0.08702054595947266, 0.08652390289306641, 0.08673382568359375, 0.08648191833496094, 0.0868485107421875, 0.08659967803955078, 0.0865249252319336, 0.08798515319824218, 0.08668672180175781, 0.0869713897705078, 0.08641024017333984, 0.08619417572021484, 0.08626278686523438, 0.08664371490478516, 0.0865577621459961, 0.08645420837402344, 0.08650342559814453, 0.08614502716064452, 0.0860057601928711, 0.08658636474609376, 0.08687513732910156, 0.08634572601318359, 0.08673484802246094, 0.086614013671875, 0.08654745483398438, 0.08647782135009766, 0.08652288055419922, 0.08698777770996094, 0.08646348571777343, 0.08669286346435547, 0.08713728332519531, 0.08633344268798829, 0.08693965148925781, 0.0867000961303711, 0.08665286254882812, 0.08626483154296875, 0.0867430419921875, 0.08634982299804687, 0.08623616027832032, 0.0861685791015625, 0.08641228485107422, 0.08606719970703125, 0.08660889434814453, 0.08628736114501953, 0.08668057250976563, 0.08639794921875, 0.08680242919921875, 0.08641741180419922, 0.1737769012451172, 0.08605696105957031, 0.08600371551513672, 0.08643071746826173, 0.08781107330322266, 0.0866519012451172, 0.08646246337890626, 0.08642668914794922, 0.08613164520263672, 0.08653823852539062, 0.08642047882080078, 0.0866324462890625, 0.08596377563476562, 0.0863815689086914, 0.08631398773193359, 0.08695295715332031, 0.08793907165527344, 0.08786227416992187, 0.08662630462646484, 0.08779571533203125, 0.08724275207519532, 0.08729190063476562, 0.0877844467163086, 0.08636006164550782, 0.0858818588256836, 0.08638771057128906, 0.086949951171875, 0.08719251251220703, 0.08616550445556641, 0.08660377502441406, 0.08642457580566407, 0.08629452514648438, 0.0865054702758789, 0.08606105804443359, 0.08653311920166015, 0.08622796630859375, 0.0860200958251953, 0.08642253112792969, 0.08642355346679688, 0.0863641586303711, 0.08620851135253907, 0.0867583999633789, 0.08634880065917969, 0.08620851135253907, 0.08670003509521484, 0.08732057952880859, 0.08644608306884766, 0.08616960144042969, 0.08388813018798828, 0.08584806060791016, 0.08618905639648437, 0.0861788787841797, 0.0874474868774414, 0.08617574310302735, 0.08607027435302735, 0.0862064666748047, 0.08648703765869141, 0.08633241271972657, 0.08608153533935547, 0.08628838348388672, 0.08599142456054687, 0.08555622100830078, 0.08707379150390625, 0.1688248291015625, 0.08354815673828125, 0.08360345458984375, 0.08642150115966797, 0.08649830627441406, 0.0861808624267578, 0.08627609252929687, 0.08619622039794922, 0.0862033920288086, 0.08743424224853516, 0.08643276977539062, 0.08662425231933593, 0.08616754913330078, 0.08617164611816407, 0.0859135971069336, 0.08686489868164063, 0.08547532653808594, 0.08608972930908203, 0.08635596466064453, 0.08650649261474609, 0.08669593811035156, 0.0856258544921875, 0.08557164764404297, 0.08602413177490234, 0.08610304260253906, 0.08613478088378906, 0.0858818588256836, 0.08467046356201172, 0.08328396606445312, 0.08320614624023437, 0.08348467254638672, 0.0833433609008789, 0.08343142700195312, 0.08517120361328125, 0.08331775665283203, 0.0835225601196289, 0.08325939178466797, 0.0829675521850586, 0.08333618927001953, 0.0835962905883789, 0.08292864227294922, 0.08303923034667969, 0.0840273895263672, 0.08592588806152343, 0.08553369903564453, 0.08563097381591797, 0.08609180450439453, 0.08550192260742187, 0.08622489929199219, 0.08613069152832031, 0.08599961853027344, 0.08535244750976563, 0.0853544921875, 0.08618495941162109, 0.08482406616210937, 0.08524288177490234, 0.08417894744873047, 0.08835072326660157, 0.08645426940917969, 0.08594847869873047, 0.08594425964355469, 0.08639794921875, 0.0862003173828125, 0.17308876037597656, 0.0861470718383789, 0.08535763549804687, 0.08573228454589844, 0.08777318572998047, 0.0860579833984375, 0.08572621154785157, 0.086181884765625, 0.08619213104248047, 0.08773222351074218, 0.08648397064208985, 0.08551526641845703, 0.08338022613525391, 0.08344371032714844, 0.08378880310058594, 0.08320511627197266, 0.08437350463867188, 0.08477490997314453, 0.08600780487060547, 0.08639385223388672, 0.08647065734863281, 0.08623411560058594, 0.08597503662109375, 0.08415744018554687, 0.08344166564941406, 0.08589516448974609, 0.08594534301757813, 0.08557977294921874, 0.08637545776367188, 0.08613065338134765, 0.08599756622314453, 0.08644300842285156, 0.08653823852539062, 0.08349593353271484, 0.08332083129882813, 0.08336589050292968, 0.08353075408935547, 0.08340172576904296, 0.08323072052001954, 0.08343756866455078, 0.08349286651611328, 0.0832890853881836, 0.0835041275024414, 0.08353897857666015, 0.08367203521728515, 0.08369664001464844, 0.08330963134765625, 0.08333510589599609, 0.08366182708740234, 0.08354617309570313, 0.08324396514892578, 0.08303308868408203, 0.08764422607421875, 0.084233154296875, 0.08548761749267578, 0.08656588745117187, 0.08594841766357422, 0.08611126708984375, 0.08648700714111328, 0.08606719970703125, 0.08635289764404297, 0.08466329956054687, 0.08477286529541016, 0.17457868957519532, 0.08449132537841797, 0.08596371459960937, 0.08606515502929687, 0.08642355346679688, 0.08605696105957031, 0.08621673583984375, 0.08608969879150391, 0.08441241455078125, 0.08336179351806641, 0.08336179351806641, 0.08567501068115234, 0.08582860565185547, 0.083346435546875, 0.08477388763427735, 0.0841707534790039, 0.08664268493652344, 0.08617881774902343, 0.0860057601928711, 0.08586239624023438, 0.08551219177246094, 0.08600678253173828, 0.08574361419677734, 0.08587980651855469, 0.0835389404296875, 0.08575692749023438, 0.08598118591308594, 0.085970947265625, 0.08765235137939453, 0.08452095794677734, 0.08327577972412109, 0.08366387176513672, 0.08551423645019532, 0.08583475494384765, 0.08568831634521484, 0.08532889556884765, 0.0860057601928711, 0.08593408203125, 0.08623308563232422, 0.08616754913330078, 0.08630989074707031, 0.08582454681396484, 0.08615420532226563, 0.08506265258789063, 0.08292249298095702, 0.08326246643066407, 0.08425062561035156, 0.08737894439697266, 0.08324813079833984, 0.08346521759033203, 0.08319385528564453, 0.08310169219970703, 0.08324198150634765, 0.08323072052001954, 0.08317030334472657, 0.08586962890625, 0.08609273529052734, 0.08769741058349609, 0.08657305908203125, 0.08599244689941406, 0.08583679962158203, 0.08583782196044921, 0.08327680206298828, 0.17533644104003906, 0.08587776184082031, 0.08605081939697265, 0.0857548828125, 0.08574873352050781, 0.0859535369873047, 0.08613581085205078, 0.08587161254882812, 0.08592998504638671, 0.08673795318603515, 0.08629654693603515, 0.08595455932617188, 0.08510157012939454, 0.08576204681396485, 0.08615424346923828, 0.08628736114501953, 0.08378880310058594, 0.08452607727050782, 0.08607129669189453, 0.0862586898803711, 0.08617062377929688, 0.08422502136230468, 0.08376422119140625, 0.0832890853881836, 0.08342630767822265, 0.0846397476196289, 0.08346630096435546, 0.08343545532226562, 0.08319590759277344, 0.08344371032714844, 0.08367001342773438, 0.0833259506225586, 0.0832511978149414, 0.08327372741699218, 0.0835389404296875, 0.0833966064453125, 0.08333004760742188, 0.0835758056640625, 0.08591462707519532, 0.08338739013671875, 0.08385945892333985, 0.08679219055175781, 0.0835594253540039, 0.08360345458984375, 0.08362598419189453, 0.08332288360595703, 0.08334137725830078, 0.08309037017822266, 0.08337209320068359, 0.08316102600097657, 0.08313855743408204, 0.08323993682861328, 0.08327168273925781, 0.08664064025878906, 0.08615526580810547, 0.08606719970703125, 0.08610099029541016, 0.08608665466308593, 0.08620543670654297, 0.0832573471069336, 0.08329011535644532, 0.08356147003173828, 0.08407449340820312]",tokens/s,11.50335704469939,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3402-5c03f64f79a16e6271cb102c;0d3402a5-0bae-4a34-9458-c71a3e1e43bf) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32a7-300eba55236c2c38412ba91c;277abb9c-504f-4052-a109-d46f0e0cf882) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30f6-359e8e8c18ca994074951b22;b281d135-2c7f-403d-975c-c4a7feee76c4) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,4461.907968,24111.480832,0.0,23465.033728,21690.932224,s,10,26.134937744140625,2.6134937744140623,0.002110944060506743,2.6131138916015626,2.6165289794921875,2.616893395996094,2.617184929199219,"[2.61349365234375, 2.615531005859375, 2.611662353515625, 2.612973388671875, 2.612295654296875, 2.611193115234375, 2.61325439453125, 2.616447998046875, 2.6172578125, 2.610828369140625]",tokens/s,97.95317000798843,kWh,3.0828719950384564e-05,1.6895240036155882e-05,0.0001496905919745939,0.00019741455196113433,tokens/kWh,1296763.5741989252,MB,4461.907968,24111.480832,0.0,23465.033728,21890.217984,s,10,1550.2514375,155.02514375,0.01758597574684522,155.027953125,155.04441406249998,155.04866796875,155.05207109375002,"[155.040484375, 155.052921875, 155.004375, 155.03190625, 155.025, 155.03090625, 155.00465625, 154.996828125, 155.020890625, 155.04346875]",tokens/s,0.4063856899342497,kWh,0.0018302277258535225,0.001003126846851137,0.008909165071770808,0.01174251964447547,tokens/kWh,5365.117701092351,,s,629,1571.3079760742191,2.498104890420062,0.3103664945461323,2.460632080078125,2.46187294921875,2.4623572265625002,5.0725159374999995,"[2.460918701171875, 2.46082666015625, 2.461980712890625, 2.4610048828125, 2.460906494140625, 2.46040576171875, 2.46205126953125, 2.4613447265625, 2.461484130859375, 2.4613662109375, 2.461151123046875, 2.461115478515625, 2.460440673828125, 2.460030029296875, 2.461190185546875, 2.460876708984375, 2.460451904296875, 2.460170166015625, 2.463140869140625, 2.46112060546875, 2.46163671875, 2.46084814453125, 2.463774658203125, 2.461010986328125, 2.4610498046875, 2.46070263671875, 2.461833251953125, 2.460528564453125, 2.4617880859375, 2.46072021484375, 2.461929443359375, 2.4613662109375, 2.46120849609375, 2.46112060546875, 2.4614892578125, 2.459990966796875, 2.461517822265625, 2.4609638671875, 2.46169189453125, 2.460205078125, 2.460928955078125, 2.461189208984375, 2.459634765625, 2.45965625, 2.45981396484375, 2.459525146484375, 2.459683837890625, 2.46053369140625, 2.46042529296875, 2.459734130859375, 2.461192138671875, 2.46074072265625, 2.46057470703125, 2.46108154296875, 2.46078564453125, 2.46036474609375, 2.461035400390625, 2.4617646484375, 2.460655517578125, 2.461253662109375, 2.461116455078125, 2.46118505859375, 5.0779052734375, 2.461738037109375, 2.461781982421875, 2.461154296875, 2.463372314453125, 2.462037109375, 2.462613525390625, 2.462189453125, 2.46213427734375, 2.46221923828125, 2.462265380859375, 2.462834716796875, 2.461442138671875, 2.460442626953125, 2.460788818359375, 2.460884033203125, 2.46097509765625, 2.461346923828125, 2.46034130859375, 2.460333984375, 2.46076318359375, 2.461075439453125, 2.460369873046875, 2.459978759765625, 2.4613203125, 2.461739013671875, 2.460712890625, 2.461740966796875, 2.46230322265625, 2.462064697265625, 2.4611767578125, 2.4604365234375, 2.46226318359375, 2.46073046875, 2.461908935546875, 2.4604140625, 2.4613857421875, 2.460633056640625, 2.46066796875, 2.459797607421875, 2.460780517578125, 2.46072119140625, 2.46026025390625, 2.4598271484375, 2.46049169921875, 2.46009765625, 2.460492919921875, 2.46042529296875, 2.461200439453125, 2.461295654296875, 2.460458984375, 2.461582275390625, 2.46042822265625, 2.460832763671875, 2.461412353515625, 2.460978271484375, 2.460579833984375, 2.4606044921875, 2.460789794921875, 2.460420166015625, 2.4605654296875, 2.461767578125, 2.46121875, 5.0727412109375, 2.4607958984375, 2.46120751953125, 2.4612392578125, 2.460706787109375, 2.46135595703125, 2.46187109375, 2.461371337890625, 2.4605634765625, 2.46158642578125, 2.46133251953125, 2.46209326171875, 2.46131103515625, 2.461284423828125, 2.46093212890625, 2.46143798828125, 2.460158935546875, 2.460937255859375, 2.459987060546875, 2.4602919921875, 2.460261474609375, 2.460375, 2.460521484375, 2.461273193359375, 2.46073046875, 2.46131396484375, 2.460286865234375, 2.46070263671875, 2.459401123046875, 2.460560302734375, 2.460210205078125, 2.4605859375, 2.459334716796875, 2.46042822265625, 2.460444580078125, 2.460652587890625, 2.4594482421875, 2.459885498046875, 2.459210693359375, 2.459854736328125, 2.45916162109375, 2.459714599609375, 2.45982421875, 2.45997265625, 2.460665771484375, 2.461526123046875, 2.459255859375, 2.4603525390625, 2.460240966796875, 2.460114990234375, 2.459881591796875, 2.46009765625, 2.459779052734375, 2.45985791015625, 2.459809814453125, 2.459896728515625, 2.459599853515625, 2.459850830078125, 2.459740234375, 2.459671630859375, 2.459470947265625, 2.45933984375, 2.4605419921875, 5.07296044921875, 2.459979736328125, 2.460074951171875, 2.460875732421875, 2.46111328125, 2.461024169921875, 2.4617041015625, 2.45998583984375, 2.461939697265625, 2.4617861328125, 2.46111328125, 2.460988525390625, 2.460284912109375, 2.460103759765625, 2.4607939453125, 2.461890625, 2.46074072265625, 2.4606279296875, 2.46205224609375, 2.46086865234375, 2.461393798828125, 2.461231201171875, 2.462035888671875, 2.4613251953125, 2.462630859375, 2.460718017578125, 2.46099853515625, 2.460675048828125, 2.4612158203125, 2.460883056640625, 2.461149169921875, 2.461590576171875, 2.4607939453125, 2.460654541015625, 2.460872802734375, 2.461231201171875, 2.46080712890625, 2.461241455078125, 2.46087158203125, 2.459632568359375, 2.460600341796875, 2.460632080078125, 2.46099365234375, 2.460303466796875, 2.46082568359375, 2.462000244140625, 2.461948974609375, 2.4596806640625, 2.460571533203125, 2.460125244140625, 2.46019677734375, 2.45987841796875, 2.46080810546875, 2.460212158203125, 2.460813232421875, 2.45957421875, 2.46019384765625, 2.460409912109375, 2.459958251953125, 2.45973095703125, 2.460412841796875, 2.46169384765625, 2.46048046875, 5.07272607421875, 2.461116455078125, 2.460166259765625, 2.461013916015625, 2.460505126953125, 2.46078662109375, 2.461305908203125, 2.4609658203125, 2.460851318359375, 2.461484130859375, 2.4605869140625, 2.4610498046875, 2.4601630859375, 2.46051318359375, 2.4606064453125, 2.461643798828125, 2.461190185546875, 2.461765625, 2.46075390625, 2.4614091796875, 2.460251220703125, 2.4604609375, 2.460229736328125, 2.460904541015625, 2.46035546875, 2.460232666015625, 2.46175439453125, 2.462803955078125, 2.4607958984375, 2.461580322265625, 2.461107177734375, 2.461393798828125, 2.460929931640625, 2.460676025390625, 2.461404052734375, 2.460982177734375, 2.460367919921875, 2.4600791015625, 2.4596552734375, 2.460010498046875, 2.45901318359375, 2.4592802734375, 2.46055224609375, 2.460517333984375, 2.459428955078125, 2.46019384765625, 2.460880859375, 2.4609208984375, 2.459928466796875, 2.46034423828125, 2.460252197265625, 2.460788818359375, 2.460273681640625, 2.45994287109375, 2.460907470703125, 2.4596796875, 2.460264404296875, 2.460949462890625, 2.460282958984375, 2.4613427734375, 2.462763916015625, 2.461613037109375, 2.460632080078125, 5.0719755859375, 2.462064697265625, 2.46147998046875, 2.46153515625, 2.461073486328125, 2.460905517578125, 2.461240234375, 2.460853271484375, 2.461421630859375, 2.462041015625, 2.46237890625, 2.46096484375, 2.461697998046875, 2.4613037109375, 2.461698974609375, 2.460219482421875, 2.462074951171875, 2.460283935546875, 2.459872314453125, 2.46007080078125, 2.45981689453125, 2.460726318359375, 2.459724853515625, 2.45985888671875, 2.460906494140625, 2.46042822265625, 2.46017626953125, 2.460051513671875, 2.46018359375, 2.46103759765625, 2.460602294921875, 2.46068212890625, 2.461318115234375, 2.460810302734375, 2.4603740234375, 2.46188037109375, 2.462437255859375, 2.460948486328125, 2.460538818359375, 2.460599365234375, 2.461042724609375, 2.46105712890625, 2.45985595703125, 2.46139697265625, 2.46086865234375, 2.460706787109375, 2.46046826171875, 2.4603740234375, 2.461024169921875, 2.4599951171875, 2.460008544921875, 2.460411865234375, 2.4604580078125, 2.460127197265625, 2.4596591796875, 2.460818359375, 2.461365234375, 2.460373046875, 2.46135302734375, 2.461212646484375, 2.4607734375, 2.460324951171875, 2.46021728515625, 5.07445458984375, 2.4605234375, 2.461729736328125, 2.461869140625, 2.461232177734375, 2.461845458984375, 2.463498291015625, 2.460968994140625, 2.46013232421875, 2.460211181640625, 2.45943603515625, 2.460124267578125, 2.46013037109375, 2.460103759765625, 2.461614013671875, 2.46322900390625, 2.461684814453125, 2.4606064453125, 2.4600556640625, 2.45979345703125, 2.459621337890625, 2.460230712890625, 2.459693115234375, 2.460508056640625, 2.460240966796875, 2.460556396484375, 2.459707275390625, 2.459610107421875, 2.459715576171875, 2.460527587890625, 2.459706298828125, 2.45965625, 2.45981591796875, 2.45966845703125, 2.4598037109375, 2.459366455078125, 2.4594267578125, 2.45954052734375, 2.46019580078125, 2.459505615234375, 2.4598701171875, 2.4605830078125, 2.46051953125, 2.4611103515625, 2.460297119140625, 2.45998388671875, 2.459361328125, 2.460695556640625, 2.46107958984375, 2.46091162109375, 2.45956494140625, 2.460255126953125, 2.460651611328125, 2.460200927734375, 2.45922314453125, 2.460324951171875, 2.460303466796875, 2.46060546875, 2.460147705078125, 2.459874267578125, 2.459558837890625, 2.460209228515625, 2.46225927734375, 5.07727880859375, 2.46051953125, 2.459748291015625, 2.460139404296875, 2.459675537109375, 2.460464111328125, 2.45935400390625, 2.460265380859375, 2.460979248046875, 2.46025927734375, 2.460180419921875, 2.45945654296875, 2.45956396484375, 2.4599091796875, 2.459864990234375, 2.459249755859375, 2.459658203125, 2.45960693359375, 2.4602666015625, 2.463476806640625, 2.46247314453125, 2.461254638671875, 2.4606064453125, 2.46013134765625, 2.4603095703125, 2.46048046875, 2.460453857421875, 2.460747802734375, 2.4610087890625, 2.45964501953125, 2.460734375, 2.460478515625, 2.460600341796875, 2.460285888671875, 2.460894287109375, 2.460316650390625, 2.460169189453125, 2.460146728515625, 2.459640869140625, 2.459826171875, 2.459534423828125, 2.46048876953125, 2.459629638671875, 2.459568115234375, 2.459454345703125, 2.46009765625, 2.46054296875, 2.46160693359375, 2.459303955078125, 2.459570068359375, 2.460251220703125, 2.459474853515625, 2.45965625, 2.460379150390625, 2.45972900390625, 2.459229248046875, 2.459660400390625, 2.45932958984375, 2.464058349609375, 2.461569091796875, 2.45967041015625, 2.459884521484375, 2.460506103515625, 5.07673388671875, 2.459622314453125, 2.462644287109375, 2.4620205078125, 2.461388916015625, 2.460607421875, 2.461044677734375, 2.461013916015625, 2.461294677734375, 2.460347412109375, 2.46035546875, 2.4605205078125, 2.460373046875, 2.459558837890625, 2.46158740234375, 2.462236572265625, 2.462750732421875, 2.46039453125, 2.459875244140625, 2.460180419921875, 2.459242431640625, 2.461361083984375, 2.461865966796875, 2.4626064453125, 2.46232470703125, 2.462096435546875, 2.460041259765625, 2.4603125, 2.460031005859375, 2.460012451171875, 2.460483642578125, 2.46249365234375, 2.460971923828125, 2.460064697265625, 2.460525634765625, 2.4618515625, 2.461947998046875, 2.46048046875, 2.460482666015625, 2.459716552734375, 2.46003515625, 2.45914111328125, 2.459892822265625, 2.4607099609375, 2.461216796875, 2.45960693359375, 2.459496337890625, 2.459989013671875, 2.46011181640625, 2.461148193359375, 2.459788330078125, 2.459845703125, 2.45964501953125, 2.45973291015625, 2.459387939453125, 2.460958740234375, 2.4599326171875, 2.459988037109375, 2.459134033203125, 2.4599716796875, 2.459010009765625, 2.4597216796875, 2.466093017578125, 5.07026025390625, 2.459715576171875, 2.459428955078125, 2.460008544921875, 2.45975244140625, 2.460271728515625, 2.4604365234375, 2.46042724609375, 2.46135595703125, 2.460927001953125, 2.460580810546875, 2.46221826171875, 2.461213623046875, 2.461199462890625, 2.460501953125, 2.461664306640625, 2.46034423828125, 2.460695556640625, 2.460241943359375, 2.46086865234375, 2.4613837890625, 2.4614892578125, 2.460771240234375, 2.461020263671875, 2.462573486328125, 2.461728759765625, 2.461054931640625, 2.46047021484375, 2.46080712890625, 2.46276806640625, 2.46089306640625, 2.461909912109375, 2.46152392578125, 2.4612607421875, 2.460707763671875, 2.461158447265625, 2.46102734375, 2.46084716796875, 2.460749755859375, 2.460251220703125, 2.460735595703125, 2.46051220703125, 2.461053955078125, 2.4607548828125, 2.46175244140625, 2.460388427734375, 2.46027880859375, 2.4614501953125, 2.461758544921875, 2.461529052734375, 2.460019775390625, 2.4599306640625, 2.460873779296875, 2.460814453125, 2.460541015625, 2.460760009765625, 2.46168994140625, 2.460673095703125, 2.46126806640625, 2.460421142578125, 2.462738525390625, 2.46156689453125, 2.461497314453125]",tokens/s,0.4003034475593408,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c2e-34b2d4de1cbd77f955c402fe;b915ab57-fc96-41eb-8e41-f0efcf5b53d3) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32fd-14868ab80bff2e2a641dfed5;640a9213-fa17-4501-ab68-562c3305e81b) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3934.998528,12732.33408,0.0,12085.886976,11337.370624,s,10,10.913714233398439,1.0913714233398437,0.0019187724556758634,1.0909278564453126,1.0936456420898437,1.0942043518066407,1.0946513195800782,"[1.0947630615234376, 1.093521484375, 1.0894732666015625, 1.0895545654296874, 1.089643798828125, 1.0893114013671874, 1.0899912109375, 1.091864501953125, 1.093216552734375, 1.0923743896484375]",tokens/s,234.56725595451456,kWh,1.2859468128946092e-05,7.046563530275307e-06,6.250710556117145e-05,8.241313722039284e-05,tokens/kWh,3106300.871854854,MB,3934.998528,12732.33408,0.0,12085.886976,11686.804992,s,10,636.61697265625,63.661697265625,0.007269593222884917,63.66015234375,63.672558203125,63.6732771484375,63.6738523046875,"[63.6561171875, 63.66687109375, 63.65788671875, 63.66241796875, 63.65402734375, 63.67399609375, 63.6723984375, 63.656875, 63.65174609375, 63.66463671875]",tokens/s,0.9896060379467405,kWh,0.0007518513685133722,0.0004120810364553653,0.003630345543163038,0.004794277948131776,tokens/kWh,13140.664909624129,,s,629,645.425664550782,1.0261139341029908,0.12959104210993952,1.0104422607421875,1.0109677490234374,1.0113603271484375,2.100010478515625,"[1.0103787231445311, 1.0104750366210937, 1.010914306640625, 1.0098585815429688, 1.0104279174804687, 1.010324462890625, 1.0105446166992187, 1.010070556640625, 1.0100776977539063, 1.0105692138671876, 1.0102251586914062, 1.0103941040039062, 1.0105692138671876, 1.0099476318359375, 1.010234375, 1.0106050415039063, 1.0103367919921875, 1.0100828247070313, 1.0105968627929687, 1.0105446166992187, 1.0105169677734376, 1.0106787719726562, 1.0105886840820313, 1.0103572387695312, 1.0107811889648437, 1.010609130859375, 1.010071533203125, 1.0102784423828124, 1.0099865112304687, 1.010282470703125, 1.0107012939453126, 1.0107606811523437, 1.0109655151367187, 1.0103797607421876, 1.0103644409179688, 1.0103593139648437, 1.0102118530273438, 1.0102118530273438, 1.0101124877929688, 1.0109286499023438, 1.0103910522460937, 1.0106552124023438, 1.011262451171875, 1.0107944946289062, 1.0104873046875, 1.0106204223632813, 1.0099425048828126, 1.0105272216796874, 1.00997119140625, 1.0104494018554688, 1.0101258544921874, 1.010229248046875, 1.0103654174804688, 1.0102947998046874, 1.0099885864257812, 1.0105845947265626, 1.0102702026367187, 1.0102896728515625, 1.0102702026367187, 1.0103797607421876, 1.0103336791992188, 1.0109767456054688, 2.1042431640625, 1.0108385009765626, 1.0106071166992188, 1.0101176147460937, 1.0099619750976563, 1.01039306640625, 1.0101083984375, 1.0104063720703125, 1.0112655639648438, 1.0104002685546876, 1.0102466430664063, 1.0105128784179687, 1.0102671508789063, 1.0101801147460938, 1.0101739501953124, 1.0103828735351563, 1.0103910522460937, 1.0109398803710938, 1.0110330810546875, 1.0103602905273437, 1.0109020385742187, 1.0112982788085938, 1.010745361328125, 1.0104309692382814, 1.0102783813476564, 1.010271240234375, 1.0101309204101563, 1.0107698974609376, 1.0114027709960938, 1.01064501953125, 1.0102230834960937, 1.0105385131835938, 1.0106644287109374, 1.0104248046875, 1.0108897094726563, 1.010545654296875, 1.0105814819335937, 1.0107955322265625, 1.0110156860351562, 1.0106214599609376, 1.01065625, 1.01066650390625, 1.0106920776367188, 1.0103101196289062, 1.0108272705078125, 1.0104053955078125, 1.0105886840820313, 1.010440185546875, 1.0111314086914063, 1.010808837890625, 1.0105968627929687, 1.010450439453125, 1.0106644287109374, 1.010177001953125, 1.0106286010742187, 1.01064599609375, 1.01049853515625, 1.0107146606445312, 1.0105742797851562, 1.0107597045898438, 1.0104780883789062, 1.01089794921875, 1.0108590087890625, 2.10035302734375, 1.0105681762695313, 1.0103634033203126, 1.0108939819335938, 1.0105936279296874, 1.01033984375, 1.0102159423828125, 1.0105128784179687, 1.0100869140625, 1.0100756225585938, 1.0101299438476563, 1.0103162841796876, 1.0100142211914063, 1.010387939453125, 1.0102691650390625, 1.0100736083984374, 1.0103839111328126, 1.0102691650390625, 1.0101923828125, 1.0100633544921875, 1.0100695190429687, 1.0103623657226561, 1.0100408325195311, 1.0103367919921875, 1.0102200317382812, 1.0101944580078126, 1.0105272216796874, 1.0104760131835937, 1.010466796875, 1.0102435913085936, 1.010566162109375, 1.010461669921875, 1.0103326416015626, 1.0106859741210938, 1.0104954833984374, 1.0103705444335938, 1.0109921264648438, 1.0105057373046875, 1.0102046508789062, 1.01007666015625, 1.0101852416992188, 1.0097950439453125, 1.0098770141601563, 1.0107269287109375, 1.0103705444335938, 1.0104033203125, 1.0112020263671875, 1.0103848876953125, 1.0109419555664063, 1.0105897216796875, 1.0109766845703125, 1.010745361328125, 1.0108344116210937, 1.011294189453125, 1.0108703002929686, 1.0104647827148439, 1.0111211547851562, 1.010250732421875, 1.0104473876953124, 1.0103490600585938, 1.0106961669921875, 1.010408447265625, 1.0106736450195313, 2.09991064453125, 1.0102855834960938, 1.010808837890625, 1.0101883544921875, 1.0098963623046875, 1.0099046630859374, 1.0102015380859375, 1.0112860107421875, 1.0114365234375, 1.0114171142578126, 1.011398681640625, 1.0108375854492186, 1.011353515625, 1.0101831665039063, 1.0099578857421876, 1.0098401489257813, 1.0104351806640626, 1.0098911743164063, 1.01047705078125, 1.0109163818359375, 1.0101514282226562, 1.0102528076171875, 1.0105303344726562, 1.010460693359375, 1.0104515380859376, 1.0105640258789061, 1.0108231811523438, 1.0104279174804687, 1.0102528076171875, 1.0102149047851563, 1.0106552124023438, 1.0108037109375, 1.0104227905273437, 1.010044921875, 1.0103521118164063, 1.0104022827148438, 1.0106019897460938, 1.0105252075195312, 1.0107361450195314, 1.0110341186523437, 1.0109389038085939, 1.0102589721679687, 1.0107658081054687, 1.01032958984375, 1.0109112548828125, 1.0104422607421875, 1.0104935302734375, 1.0104278564453124, 1.0109645385742188, 1.0107493896484374, 1.01064501953125, 1.0110023803710937, 1.0106593017578125, 1.0102763671875, 1.01026611328125, 1.0103255004882812, 1.0103726196289062, 1.010171875, 1.0105169677734376, 1.0103091430664062, 1.0100910034179686, 1.0103613891601562, 1.010420654296875, 2.10003857421875, 1.0099517211914062, 1.010044921875, 1.0100490112304688, 1.010212890625, 1.0100838623046875, 1.0100510864257812, 1.0103336791992188, 1.0101473388671875, 1.0099937133789063, 1.0099507446289062, 1.0100213623046874, 1.0101913452148437, 1.010255859375, 1.0110208129882812, 1.0101217041015624, 1.0101422119140624, 1.0106214599609376, 1.0106583251953125, 1.0105067749023438, 1.0107003173828124, 1.0106572875976563, 1.0101596069335939, 1.0103726196289062, 1.0107791137695312, 1.0102958374023439, 1.0105692138671876, 1.010567138671875, 1.0101862182617187, 1.0100357055664062, 1.0101422119140624, 1.0100562133789062, 1.0101636962890626, 1.0102886352539062, 1.0103009033203125, 1.0100858764648437, 1.0102108154296876, 1.0106234741210938, 1.0103255004882812, 1.0102271728515626, 1.0103224487304687, 1.0102650756835938, 1.0104852294921876, 1.0101156005859375, 1.0108948364257813, 1.0105333862304688, 1.0106009521484376, 1.010366455078125, 1.0103654174804688, 1.0105374755859375, 1.0105303344726562, 1.010418701171875, 1.0103674926757813, 1.0106214599609376, 1.0106808471679687, 1.0101104736328126, 1.0104002685546876, 1.010713623046875, 1.010361328125, 1.0107811889648437, 1.0112348022460937, 1.0109830322265625, 1.010874267578125, 2.099938232421875, 1.0099097900390626, 1.010356201171875, 1.0107750244140625, 1.0106531982421876, 1.0112010498046875, 1.0109235229492188, 1.0112593994140624, 1.011103759765625, 1.0114088745117187, 1.0117509155273436, 1.0115809326171874, 1.0110750732421876, 1.0116761474609375, 1.0117703857421876, 1.01070849609375, 1.0111211547851562, 1.0108140869140625, 1.010822021484375, 1.0101463012695313, 1.010524169921875, 1.010092041015625, 1.0108528442382811, 1.0102384643554687, 1.0105374755859375, 1.0103961791992186, 1.0102907104492187, 1.0105763549804687, 1.0101422119140624, 1.0106972045898437, 1.010524169921875, 1.0101381225585937, 1.01081396484375, 1.0101923828125, 1.010398193359375, 1.0102046508789062, 1.0100275268554688, 1.0102159423828125, 1.01076171875, 1.0107730102539063, 1.0104595947265624, 1.0105743408203125, 1.0107811889648437, 1.0105466918945312, 1.010503662109375, 1.0107811889648437, 1.0115594482421875, 1.0113648681640626, 1.0109235229492188, 1.0113966064453126, 1.0115973510742187, 1.0105558471679688, 1.0106337280273439, 1.0101053466796874, 1.0102333374023438, 1.0100643920898438, 1.0104063720703125, 1.0102118530273438, 1.0105886840820313, 1.0109163818359375, 1.01098291015625, 1.0103121948242189, 1.0104627075195312, 2.1005556640625, 1.0100828247070313, 1.0103214111328125, 1.0101217041015624, 1.0104524536132813, 1.0106480712890624, 1.009934326171875, 1.0109644775390625, 1.01163427734375, 1.011917724609375, 1.0118707275390626, 1.0117642211914062, 1.0120970458984375, 1.0110791625976563, 1.0113136596679688, 1.0106326904296874, 1.010629638671875, 1.0110320434570312, 1.0109531860351562, 1.0102200317382812, 1.0101647338867188, 1.01035107421875, 1.0101319580078125, 1.0104688720703126, 1.0105006103515626, 1.01138330078125, 1.010892822265625, 1.0108292846679687, 1.0109214477539064, 1.0098134765625, 1.0101145629882813, 1.0102262573242187, 1.0102721557617187, 1.010165771484375, 1.0108426513671875, 1.0108047485351563, 1.010640869140625, 1.0113280029296876, 1.0108528442382811, 1.010176025390625, 1.0101810913085938, 1.0100869140625, 1.0103460083007811, 1.010440185546875, 1.0107904052734376, 1.0101801147460938, 1.0104586181640625, 1.0103050537109375, 1.0111918334960937, 1.0107914428710938, 1.0108416137695313, 1.0108283081054688, 1.0106810302734375, 1.0106591186523437, 1.0115072021484375, 1.0104279174804687, 1.0108436279296875, 1.0108283081054688, 1.01055078125, 1.0102097778320311, 1.010460693359375, 1.0103121948242189, 1.0103951416015624, 2.102578125, 1.0104268798828124, 1.0102036743164062, 1.010355224609375, 1.0103173217773438, 1.0101637573242188, 1.01056201171875, 1.0103060302734375, 1.0101749877929687, 1.0104330444335938, 1.0104094848632812, 1.0101801147460938, 1.0102282104492188, 1.0102640380859376, 1.0102354125976563, 1.01035107421875, 1.0101268310546876, 1.01020263671875, 1.0103245849609375, 1.010389892578125, 1.0105927734375, 1.0102783813476564, 1.0102159423828125, 1.0109481201171875, 1.0104248046875, 1.0102097778320311, 1.0102999267578125, 1.0106634521484374, 1.01051904296875, 1.0106911010742188, 1.0106603393554687, 1.0103428955078124, 1.010208740234375, 1.0103828735351563, 1.0103501586914063, 1.0101267700195313, 1.010428955078125, 1.0099415283203126, 1.0103070678710937, 1.010746337890625, 1.0105733032226563, 1.0100193481445312, 1.0105620727539062, 1.0108969116210937, 1.010830322265625, 1.010567138671875, 1.0105261840820312, 1.0104903564453125, 1.010502685546875, 1.010763916015625, 1.0106510009765626, 1.01055078125, 1.010597900390625, 1.010534423828125, 1.01037158203125, 1.0100613403320313, 1.0104227905273437, 1.0101196899414062, 1.0103214111328125, 1.0106286010742187, 1.0109337768554687, 1.0104739990234375, 1.0107742309570313, 2.104293212890625, 1.010323486328125, 1.0104320068359376, 1.0102138671875, 1.0099435424804688, 1.0105354614257813, 1.0101022338867187, 1.0104801025390624, 1.0105220947265625, 1.0102742919921874, 1.0100582275390626, 1.0102661743164063, 1.0102158813476563, 1.0104473876953124, 1.0102271728515626, 1.0102630615234376, 1.0100213623046874, 1.0100828247070313, 1.0104155883789063, 1.01005517578125, 1.010208740234375, 1.0101239013671874, 1.0102445068359376, 1.0100910034179686, 1.0104801025390624, 1.0102210693359375, 1.0102210693359375, 1.0105426025390625, 1.0104248046875, 1.0105814819335937, 1.0103746337890624, 1.0100991821289063, 1.0102630615234376, 1.0103203735351562, 1.0105231323242188, 1.0105108642578124, 1.0102191162109375, 1.0101605224609376, 1.0104053955078125, 1.0104586181640625, 1.0102518920898438, 1.0101390380859374, 1.0103285522460939, 1.0101493530273438, 1.0104166259765626, 1.0106941528320312, 1.0107689208984374, 1.0102313232421876, 1.0105620727539062, 1.01051708984375, 1.0104871826171875, 1.01003369140625, 1.0101371459960937, 1.0099342651367187, 1.0106521606445313, 1.0104391479492187, 1.0105620727539062, 1.010513916015625, 1.010798583984375, 1.0104074096679687, 1.0103634033203126, 1.0103224487304687, 1.010681884765625, 2.103435302734375, 1.010229248046875, 1.0106736450195313, 1.0107843017578124, 1.0107432861328125, 1.0110986328125, 1.0106265869140625, 1.010587646484375, 1.0101248168945312, 1.0100542602539062, 1.0107769775390625, 1.0108426513671875, 1.0104627075195312, 1.0106603393554687, 1.0108734741210939, 1.01037255859375, 1.0107083740234375, 1.0104166259765626, 1.0103275756835937, 1.0101319580078125, 1.0107750244140625, 1.0107258911132813, 1.0107606811523437, 1.0102907104492187, 1.0104279174804687, 1.0105446166992187, 1.0105364379882813, 1.0103009643554688, 1.010494384765625, 1.0100807495117188, 1.0106644287109374, 1.0108026733398439, 1.0108375244140626, 1.010492431640625, 1.0103746337890624, 1.0101227416992187, 1.0101484375, 1.0099373168945311, 1.0103756713867187, 1.0104944458007812, 1.0109050903320314, 1.0101473388671875, 1.0104903564453125, 1.0099251098632813, 1.0105374755859375, 1.010134033203125, 1.0101810913085938, 1.0101248168945312, 1.0104063720703125, 1.0102702026367187, 1.0105569458007813, 1.0107965698242187, 1.01083544921875, 1.0107893676757813, 1.01082421875, 1.0101309204101563, 1.0103572387695312, 1.010207763671875, 1.010361328125, 1.0107053833007813, 1.0118082275390625, 1.01166796875, 1.01172021484375]",tokens/s,0.9745506485828798,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2055.856128,6201.802752,0.0,5555.355648,5273.986048,s,10,6.166099914550782,0.6166099914550782,0.001805187295356428,0.6164970397949219,0.6192070678710938,0.619502490234375,0.619738828125,"[0.6174358520507812, 0.6152646484375, 0.6157444458007812, 0.6149035034179687, 0.615364013671875, 0.6138878173828125, 0.6173106689453125, 0.6197979125976563, 0.6172496337890625, 0.6191414184570313]",tokens/s,415.1732919472978,kWh,7.25516759687e-06,3.97553441218727e-06,3.4973328632232774e-05,4.6204030641290036e-05,tokens/kWh,5540642.157120091,MB,2055.856128,6201.802752,0.0,5555.355648,5324.908032,s,10,365.7995859375,36.57995859374999,0.035939708288129694,36.562037109375,36.625498828125,36.6277962890625,36.6296342578125,"[36.623765625, 36.5703828125, 36.55220703125, 36.541484375, 36.54166015625, 36.54937890625, 36.62498828125, 36.55369140625, 36.61193359375, 36.63009375]",tokens/s,1.7222545465309549,kWh,0.0004313527007732127,0.00023641881037992466,0.002030578407141164,0.002698349918294301,tokens/kWh,23347.602018875292,,s,629,370.76827941894544,0.5894567240364791,0.07322585165475656,0.5803335571289062,0.5820514404296875,0.5823793212890626,1.195406015625,"[0.5803048706054688, 0.5797140502929687, 0.5794703369140625, 0.5797222290039062, 0.57939453125, 0.5800867919921875, 0.5797529296875, 0.5800857543945312, 0.5802291259765625, 0.5800048828125, 0.5795665893554688, 0.5795399780273438, 0.580042724609375, 0.5822300415039062, 0.5817169799804688, 0.5822146606445312, 0.5819473876953125, 0.5820886840820313, 0.5811210327148437, 0.582023193359375, 0.5814353637695312, 0.581949462890625, 0.580401123046875, 0.5816944580078125, 0.5808414916992187, 0.5820006103515625, 0.5810022583007812, 0.5814302978515625, 0.581444580078125, 0.5812797241210937, 0.5823364868164063, 0.5824470825195313, 0.5824716796875, 0.5824655151367187, 0.581918701171875, 0.5819730224609375, 0.58178662109375, 0.5807022094726563, 0.5815429077148437, 0.5823539428710938, 0.5825064697265625, 0.5821757202148438, 0.5818603515625, 0.5820498046875, 0.582350830078125, 0.5819617919921874, 0.5816124877929687, 0.5811609497070312, 0.5808312377929687, 0.582739990234375, 0.5819484252929688, 0.5809541015625, 0.5825567016601563, 0.5805885620117187, 0.5814404907226562, 0.581485595703125, 0.581022705078125, 0.5828966674804688, 0.5815776977539062, 0.5818429565429688, 0.5819647827148438, 0.5811875610351562, 1.1957647705078125, 0.5814425659179687, 0.5816586303710938, 0.5816115112304687, 0.5822146606445312, 0.5820579833984375, 0.5818347778320313, 0.5802680053710938, 0.5797498779296875, 0.5799751586914063, 0.5797396240234375, 0.5802352905273438, 0.5802066040039062, 0.580453369140625, 0.5803786010742188, 0.5806100463867188, 0.5803131103515625, 0.5797007446289062, 0.5798512573242187, 0.5799127197265626, 0.5799597778320312, 0.5807144775390625, 0.5798113403320313, 0.579999755859375, 0.5794037475585937, 0.5798748168945312, 0.5804503173828125, 0.5799270629882812, 0.5803120727539063, 0.5800960083007812, 0.5797396240234375, 0.5799884643554688, 0.5807646484375, 0.5804298095703125, 0.5802833862304687, 0.5804861450195312, 0.5813104858398438, 0.5805506591796875, 0.581159912109375, 0.5806080932617188, 0.5819862670898438, 0.5803499755859375, 0.5799393310546875, 0.5800376586914062, 0.5834240112304687, 0.5802772216796875, 0.5810247802734375, 0.5808568115234375, 0.58094287109375, 0.5806510009765625, 0.580552734375, 0.580220947265625, 0.5798553466796875, 0.5802045288085937, 0.5803714599609375, 0.5809602661132812, 0.5803591918945312, 0.5802659912109375, 0.5799219360351563, 0.5800376586914062, 0.57982568359375, 0.5798696899414062, 0.579905517578125, 1.19632080078125, 0.5800499267578125, 0.5804564208984375, 0.5798563842773438, 0.5799393310546875, 0.57995263671875, 0.5803939819335937, 0.5799925537109375, 0.5806735229492187, 0.5805301513671876, 0.5799966430664063, 0.5798082275390625, 0.5798707275390625, 0.5796608276367188, 0.579547119140625, 0.5798369140625, 0.5801513061523438, 0.57986767578125, 0.5802199096679688, 0.5796167602539063, 0.5794580688476563, 0.5797939453125, 0.5794334716796875, 0.5804932861328125, 0.5801594848632813, 0.5801451416015625, 0.5808414916992187, 0.5797929077148437, 0.5807810668945312, 0.5800017700195312, 0.5802393798828125, 0.5800509643554688, 0.5801564331054687, 0.5809776611328125, 0.579979248046875, 0.580262939453125, 0.5801871337890625, 0.58008984375, 0.58040625, 0.5803786010742188, 0.5804656372070313, 0.5801768798828125, 0.5805813598632813, 0.5812029418945313, 0.5800724487304687, 0.5806858520507813, 0.5798379516601563, 0.580527099609375, 0.5803837280273437, 0.5800488891601563, 0.5802803344726563, 0.5804451904296875, 0.5802803344726563, 0.58022607421875, 0.580389892578125, 0.5802311401367187, 0.580168701171875, 0.5799096069335937, 0.580010986328125, 0.5800120239257812, 0.5806653442382812, 0.5806591796875, 0.5803489379882812, 1.1951103515625, 0.5797929077148437, 0.579947509765625, 0.579768310546875, 0.5811712036132812, 0.5812357177734375, 0.58012060546875, 0.5796597900390625, 0.5797007446289062, 0.5796484985351562, 0.5794017333984375, 0.580021240234375, 0.5799884643554688, 0.5799659423828125, 0.5805444946289062, 0.5804830932617188, 0.5801144409179687, 0.5799618530273437, 0.5795880737304687, 0.579757080078125, 0.579483642578125, 0.5798082275390625, 0.5797939453125, 0.5796690063476563, 0.5802239990234375, 0.57957275390625, 0.5796597900390625, 0.5793659057617188, 0.5799301147460938, 0.579904541015625, 0.5796874389648438, 0.580485107421875, 0.5801973876953125, 0.5806879272460937, 0.5800631713867187, 0.579857421875, 0.5803335571289062, 0.5799208984375, 0.5803970336914063, 0.5798604736328125, 0.5801195678710938, 0.5799127197265626, 0.5803151245117187, 0.5800714111328125, 0.5800929565429688, 0.5801134033203125, 0.579852294921875, 0.580485107421875, 0.5806182250976563, 0.5799198608398437, 0.5808701171875, 0.5802598266601563, 0.5800806274414062, 0.5799147338867188, 0.5800150756835938, 0.5798461303710938, 0.5798594360351562, 0.5798338623046875, 0.5796372680664063, 0.5799874267578125, 0.5799168090820312, 0.57957373046875, 0.580232177734375, 1.19552099609375, 0.5814528198242187, 0.5798369140625, 0.5804021606445312, 0.579589111328125, 0.5795419921875, 0.5797027587890625, 0.5801830444335937, 0.5802567749023437, 0.5800621948242187, 0.57980517578125, 0.5795338134765625, 0.5802721557617188, 0.5796618041992188, 0.5800499267578125, 0.57969970703125, 0.5798021240234374, 0.58012158203125, 0.581080078125, 0.5802280883789063, 0.5805322265625, 0.5801748657226562, 0.57978369140625, 0.5796332397460937, 0.57942626953125, 0.5793402709960938, 0.5803540649414063, 0.5799547119140624, 0.579684326171875, 0.5799802856445313, 0.5798615112304687, 0.5798123779296875, 0.579684326171875, 0.5798983764648438, 0.5798276977539063, 0.579751953125, 0.58046875, 0.5798799438476563, 0.5800233154296875, 0.5798615112304687, 0.5794273071289062, 0.5798963012695313, 0.5797560424804687, 0.5802567749023437, 0.5799588012695313, 0.5798819580078125, 0.5800070190429687, 0.5797190551757813, 0.5804554443359375, 0.5799905395507813, 0.58028955078125, 0.5803919067382812, 0.5802905883789062, 0.5805660400390625, 0.579968994140625, 0.5803079833984375, 0.5797980346679688, 0.580832275390625, 0.580274169921875, 0.5802926025390625, 0.58039501953125, 0.57959423828125, 0.5799966430664063, 1.19406689453125, 0.579810302734375, 0.5797396240234375, 0.5796720581054687, 0.5796259765625, 0.5796741333007812, 0.5809397583007813, 0.5795031127929687, 0.5802731323242187, 0.5795594482421875, 0.5797140502929687, 0.579177490234375, 0.5797734375, 0.5801287841796875, 0.5797498779296875, 0.5798656005859375, 0.5796587524414063, 0.5798328247070312, 0.5797969970703125, 0.5797437744140626, 0.5796495361328124, 0.5797191772460938, 0.5803560791015625, 0.5800233154296875, 0.5815418701171875, 0.5802926025390625, 0.5799188232421875, 0.5799495849609375, 0.5795277099609375, 0.5796403198242187, 0.5796351928710938, 0.5801963500976562, 0.5797037963867188, 0.5795000610351563, 0.5798338623046875, 0.5798881225585938, 0.5797590942382812, 0.5800130615234375, 0.58073193359375, 0.5800908813476563, 0.5806469116210937, 0.5811783447265625, 0.5802403564453125, 0.5803519897460937, 0.58052197265625, 0.5798973388671875, 0.5800233154296875, 0.5798533325195312, 0.5805916137695313, 0.5798184814453125, 0.5794979858398438, 0.579715087890625, 0.5797007446289062, 0.5794171142578125, 0.5801093139648438, 0.58170166015625, 0.5814415283203125, 0.5815838623046875, 0.5820006103515625, 0.57984716796875, 0.5823866577148438, 0.5812367553710938, 0.5812715454101562, 1.19872412109375, 0.5820743408203125, 0.5816565551757813, 0.582371337890625, 0.5821552734375, 0.5816156005859375, 0.5810933837890625, 0.5808988037109375, 0.5822463989257812, 0.5805066528320313, 0.5815418701171875, 0.5819443359375, 0.5820221557617188, 0.5824573364257812, 0.58057421875, 0.5815746459960938, 0.5819218139648438, 0.581970947265625, 0.5814149169921875, 0.58176513671875, 0.5813401489257812, 0.5796444091796875, 0.5797304077148437, 0.57986767578125, 0.57969970703125, 0.5803519897460937, 0.581917724609375, 0.5818121948242188, 0.5813934326171875, 0.5819647827148438, 0.5817763671875, 0.5821880493164062, 0.5812572021484375, 0.5817692260742188, 0.58166064453125, 0.5815797729492187, 0.5820784912109375, 0.5817958374023438, 0.5818296508789063, 0.581411865234375, 0.58090087890625, 0.5812305908203125, 0.5817753295898438, 0.581781494140625, 0.5817088012695313, 0.5801287841796875, 0.5828178100585938, 0.5812131958007812, 0.5801339111328125, 0.5819945068359375, 0.5805414428710938, 0.5801564331054687, 0.58094384765625, 0.5817108764648438, 0.5819852905273437, 0.5817589721679688, 0.5808824462890625, 0.580969482421875, 0.5814876098632813, 0.5799014282226562, 0.5799772338867187, 0.5808035888671875, 0.5819320068359375, 1.200752685546875, 0.5804697875976562, 0.5798113403320313, 0.5795338134765625, 0.57959423828125, 0.5793955688476562, 0.5800294189453125, 0.579673095703125, 0.5795379028320312, 0.5792747802734375, 0.57942529296875, 0.5792655639648437, 0.579324951171875, 0.5791590576171874, 0.579515380859375, 0.5797427368164062, 0.5802495727539062, 0.5797744750976562, 0.58040625, 0.5798430786132812, 0.5793853149414062, 0.5796864013671875, 0.5795184936523438, 0.5801082763671875, 0.5802659912109375, 0.5800826416015625, 0.5807779541015625, 0.5820303344726563, 0.5817589721679688, 0.5796484985351562, 0.5796925659179688, 0.580094970703125, 0.5802587890625, 0.5804124145507813, 0.5822156982421876, 0.5799669799804688, 0.5799188232421875, 0.5803817138671875, 0.5797427368164062, 0.5796126708984375, 0.5807124633789063, 0.5810831298828125, 0.5797539672851563, 0.579541015625, 0.5795164184570313, 0.5796055297851562, 0.5795983276367187, 0.5792973022460938, 0.5793689575195312, 0.5794713745117187, 0.5795768432617188, 0.5799382934570313, 0.5819227905273437, 0.5818388671875, 0.581307373046875, 0.5798348999023437, 0.5799434204101562, 0.5802772216796875, 0.5826939086914062, 0.5821061401367188, 0.582097900390625, 0.5818736572265625, 0.5818245239257812, 1.197048828125, 0.5797693481445313, 0.5795419921875, 0.579435546875, 0.5806448364257812, 0.580769775390625, 0.5797642211914062, 0.5793648681640625, 0.5799239501953125, 0.579820556640625, 0.5798553466796875, 0.5799976806640625, 0.5800867919921875, 0.5793955688476562, 0.579800048828125, 0.5796505737304688, 0.5797386474609375, 0.5797744750976562, 0.5809653930664063, 0.58010009765625, 0.5795747680664063, 0.5806735229492187, 0.5816740112304688, 0.5815050048828125, 0.5816995849609375, 0.581855224609375, 0.5821460571289062, 0.5816473388671874, 0.5822300415039062, 0.5816422119140625, 0.5811865844726563, 0.58155517578125, 0.5818736572265625, 0.5809817504882813, 0.5813842163085937, 0.5811926879882813, 0.5820927734375, 0.5812623291015625, 0.5827706909179687, 0.582930419921875, 0.5829867553710938, 0.58191259765625, 0.5821531982421875, 0.5811548461914062, 0.5818746948242187, 0.5817907104492187, 0.5817804565429687, 0.5819412231445312, 0.5816668090820313, 0.5811885986328125, 0.5814149169921875, 0.5814466552734375, 0.5809336547851562, 0.581085205078125, 0.5813053588867187, 0.581718017578125, 0.5829119873046875, 0.582118408203125, 0.5821614379882812, 0.5819883422851563, 0.5823518676757813, 0.5817088012695313, 0.5822545776367187, 1.201244140625, 0.580126708984375, 0.5799127197265626, 0.5811712036132812, 0.5819514770507812, 0.5811456298828125, 0.5812008666992188, 0.5814640502929688, 0.5819463500976563, 0.5820886840820313, 0.5823846435546876, 0.580305908203125, 0.5809714965820313, 0.5801236572265625, 0.5817006225585938, 0.5822177124023438, 0.5816535034179687, 0.5801830444335937, 0.58051171875, 0.5805444946289062, 0.5798604736328125, 0.5808660278320312, 0.5804462280273438, 0.5811814575195312, 0.5804677124023437, 0.581433349609375, 0.5835038452148438, 0.5813534545898438, 0.5819402465820313, 0.5816535034179687, 0.58176513671875, 0.5820324096679688, 0.5815357666015625, 0.5827686157226563, 0.582540283203125, 0.5820057373046875, 0.5823068237304687, 0.58144970703125, 0.5820303344726563, 0.5807462158203125, 0.5822003173828125, 0.5823150024414062, 0.5814906616210938, 0.5829560546875, 0.5828761596679688, 0.5815275268554687, 0.5806663818359376, 0.5816873168945312, 0.5818255615234375, 0.58170166015625, 0.5825853271484375, 0.5802014770507813, 0.5799505615234375, 0.5799178466796875, 0.581676025390625, 0.5818982543945312, 0.5817825317382812, 0.5805660400390625, 0.581728271484375, 0.581496826171875, 0.5820068359375, 0.5809755859375, 0.5805465087890626]",tokens/s,1.6964773820072907,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a309b-1493a7dd3df908132acb04ed;fb23e3ab-5c77-4af2-92e8-e1dedd466e34) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34ae-26206a7a025e0685503e4e7a;f8a28d16-8c5b-4c4d-8305-cc5eea4a03aa) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fdb-1642bb3e4e3981e70d42c895;8c169a42-3d13-4f78-bb30-35baee4af177) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc39f-10e55af803985b57041b23f9;dea7a8c7-cc1d-4a71-b271-13ca03889124) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3f5-7a9622717fe7cbd55e5b074e;7d61eb0b-4252-4139-bbc9-28b61d3599b5) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2981.855232,9259.450368,0.0,8613.003264,8211.364864,s,10,10.951241455078126,1.0951241455078125,0.002069112877122291,1.0949886474609376,1.0979957763671875,1.0981948486328124,1.0983541064453124,"[1.0979515380859375, 1.0983939208984375, 1.0932901611328125, 1.0924078369140624, 1.0937889404296874, 1.092544677734375, 1.0941727294921875, 1.0958045654296875, 1.096221435546875, 1.0966656494140625]",tokens/s,233.76345143161097,kWh,1.2900002946456276e-05,7.068370924207557e-06,5.9710658879597037e-05,7.967903275026086e-05,tokens/kWh,3212890.4074724964,MB,2986.262528,9330.753536,0.0,8684.306432,8503.627264,s,10,640.8562343749999,64.0856234375,0.007507663361876726,64.08716796875,64.093946875,64.09397148437499,64.09399117187499,"[64.08627734375, 64.0873125, 64.09369921875, 64.09394140625, 64.0870234375, 64.09399609375, 64.08822265625, 64.078796875, 64.0717890625, 64.07517578125]",tokens/s,0.9830597975135754,kWh,0.000756652800159322,0.0004147102533278666,0.0034761336142379974,0.004647496667725186,tokens/kWh,13555.684813613145,,s,629,649.691616149902,1.0328960511127223,0.130104123840355,1.0171883544921876,1.0177021728515625,1.01789716796875,2.1117173046874997,"[1.0174935302734376, 1.0171514892578124, 1.0171883544921876, 1.017997314453125, 1.0175242309570312, 1.0175477905273438, 1.0175784912109376, 1.017881591796875, 1.0172057495117188, 1.0168115234375, 1.0167367553710938, 1.0173132934570313, 1.0168719482421875, 1.0171760864257813, 1.0166824951171876, 1.0168237915039062, 1.0169927978515625, 1.0175252685546874, 1.0170921020507813, 1.0169978637695312, 1.0169354248046876, 1.0170029907226563, 1.0173900756835939, 1.017280517578125, 1.016859619140625, 1.0172426147460938, 1.016791015625, 1.0168565673828125, 1.0172825317382812, 1.01675927734375, 1.0171392211914063, 1.0170572509765625, 1.0173419799804688, 1.0176777954101563, 1.0177576904296874, 1.0170183715820313, 1.0177105712890624, 1.0174996337890625, 1.0173941650390625, 1.0173890380859374, 1.017080810546875, 1.0169651489257812, 1.0173880615234374, 1.0172467041015625, 1.0171791381835937, 1.016754150390625, 1.0172498168945312, 1.0174290161132813, 1.0169159545898439, 1.0170194091796876, 1.016964111328125, 1.0168411865234375, 1.0170531616210938, 1.0171678466796874, 1.0177003784179688, 1.0177116088867189, 1.0175713500976562, 1.0176112670898438, 1.01741259765625, 1.01737060546875, 1.0172507934570312, 1.0171494140625, 2.116391845703125, 1.01690673828125, 1.0169548950195313, 1.0181068725585938, 1.0178170776367188, 1.0172692260742187, 1.0174985961914063, 1.017291748046875, 1.0175293579101563, 1.0175150146484375, 1.016748046875, 1.0170767211914062, 1.0172333984375, 1.0175641479492188, 1.0175477905273438, 1.0169292602539062, 1.016869873046875, 1.0169712524414063, 1.0170634155273437, 1.0173245239257813, 1.0168094482421874, 1.016943603515625, 1.0171064453125, 1.0171781005859375, 1.017275390625, 1.0166548461914062, 1.017407470703125, 1.0169978637695312, 1.016574951171875, 1.0173306884765625, 1.0168370971679688, 1.016875, 1.0170009765625, 1.0174505004882812, 1.0167992553710938, 1.0171494140625, 1.0171607055664063, 1.0168944702148437, 1.0170101928710937, 1.017059326171875, 1.017080810546875, 1.0172037353515626, 1.0174771118164063, 1.0174095458984376, 1.0170245361328125, 1.019852783203125, 1.0168125610351562, 1.0170029907226563, 1.0175538940429687, 1.0178017578125, 1.0176777954101563, 1.016995849609375, 1.0182564086914063, 1.0176224975585937, 1.0173235473632813, 1.01770751953125, 1.0170552368164063, 1.01711669921875, 1.0171351318359374, 1.0170449829101562, 1.016859619140625, 1.0170890502929688, 1.0175088500976563, 2.11154345703125, 1.0170613403320312, 1.0173931274414063, 1.0170582885742188, 1.01701123046875, 1.01686474609375, 1.0172272338867188, 1.0170921020507813, 1.0172078247070313, 1.0167490844726563, 1.0166835327148438, 1.0169088134765625, 1.0174351196289062, 1.0175641479492188, 1.017354248046875, 1.0168699340820313, 1.0168974609375, 1.0171945190429688, 1.0176737670898437, 1.0177105102539064, 1.017945068359375, 1.0175324096679688, 1.0171945190429688, 1.0176880493164062, 1.0172406005859376, 1.0171146240234374, 1.0182410278320313, 1.0179573974609375, 1.0179983520507812, 1.0176327514648438, 1.0174893798828124, 1.0176010131835938, 1.0182543334960938, 1.0177720336914062, 1.0173716430664062, 1.017248779296875, 1.016859619140625, 1.0170859375, 1.0173767700195313, 1.0172733154296876, 1.0168115234375, 1.0170787963867187, 1.0168923950195312, 1.0172713012695314, 1.0172866821289062, 1.0175477905273438, 1.0167623901367187, 1.0171729736328126, 1.0170921020507813, 1.0174443359375, 1.0174791870117188, 1.017380859375, 1.0176481323242188, 1.0177054443359375, 1.0170101928710937, 1.01709619140625, 1.0171812133789062, 1.0176399536132812, 1.0173388671875, 1.017680908203125, 1.0173245239257813, 1.0175344848632812, 1.016958984375, 2.111784912109375, 1.0173778076171875, 1.0168678588867188, 1.0177362060546875, 1.01760205078125, 1.0175693359375, 1.0170152587890624, 1.0167971801757814, 1.0172047119140626, 1.0175958862304688, 1.016896484375, 1.0167449340820311, 1.017417724609375, 1.0172200927734374, 1.016796142578125, 1.0171340942382812, 1.016943603515625, 1.01682177734375, 1.0168862915039063, 1.0173562622070313, 1.017080810546875, 1.0174678955078125, 1.0173972778320313, 1.0175897827148437, 1.017354248046875, 1.0172293090820312, 1.0175057983398437, 1.0172456665039062, 1.0170859375, 1.0177197875976562, 1.017154541015625, 1.0173880615234374, 1.0175590209960939, 1.01743408203125, 1.016933349609375, 1.0172835693359374, 1.017112548828125, 1.0172262573242188, 1.0170572509765625, 1.017354248046875, 1.01725390625, 1.0173184204101562, 1.018461181640625, 1.0179942626953125, 1.0194544677734374, 1.0179215087890625, 1.0178590698242187, 1.0177402954101562, 1.017133056640625, 1.0170347290039063, 1.0172620849609375, 1.0173992919921875, 1.0176665649414063, 1.01707470703125, 1.0167613525390624, 1.0173092041015626, 1.0173604125976563, 1.0174525146484374, 1.017565185546875, 1.0170890502929688, 1.01747509765625, 1.017607177734375, 1.0175682373046875, 2.112203857421875, 1.017459716796875, 1.0166384887695312, 1.0169661865234374, 1.0174985961914063, 1.0174044189453124, 1.016933349609375, 1.0176819458007813, 1.0176041259765625, 1.0174843139648437, 1.017217041015625, 1.0171586303710938, 1.01719140625, 1.017375732421875, 1.0171525268554686, 1.0169210815429688, 1.017154541015625, 1.0176266479492186, 1.0178191528320313, 1.0170921020507813, 1.0168934326171875, 1.0171627807617187, 1.0176942138671874, 1.017333740234375, 1.0175324096679688, 1.0170572509765625, 1.0169077758789062, 1.017111572265625, 1.0173767700195313, 1.0171566162109376, 1.0170460205078125, 1.0172252197265625, 1.0173870239257812, 1.0172252197265625, 1.0171300048828125, 1.0168084716796875, 1.017064453125, 1.0177720336914062, 1.016975341796875, 1.0169774169921875, 1.0169395141601563, 1.0170787963867187, 1.0171217651367188, 1.0177638549804688, 1.0169712524414063, 1.016958984375, 1.01680126953125, 1.0175764770507814, 1.0173480834960937, 1.0168197021484375, 1.017017333984375, 1.0169978637695312, 1.0169896850585938, 1.0174003295898437, 1.0170214233398438, 1.01707568359375, 1.0175559692382812, 1.01893017578125, 1.0172241821289063, 1.0170316772460937, 1.01722216796875, 1.0172119140625, 1.0173163452148437, 2.1100595703125, 1.0167705688476563, 1.0177894287109375, 1.0171873168945313, 1.0167449340820311, 1.0167142333984376, 1.016875, 1.0168862915039063, 1.0167982177734376, 1.0173235473632813, 1.0169313354492187, 1.0169343872070313, 1.0175621337890626, 1.017955322265625, 1.0172938232421875, 1.0173009643554687, 1.017554931640625, 1.0171617431640625, 1.0173388671875, 1.0177402954101562, 1.0175774536132813, 1.0175682373046875, 1.0177136840820313, 1.0181427001953125, 1.0177576904296874, 1.0176788330078126, 1.017692138671875, 1.017617431640625, 1.0178262939453124, 1.0180515747070313, 1.0169354248046876, 1.0168514404296876, 1.0176296997070313, 1.0172395629882813, 1.0167859497070313, 1.0172784423828125, 1.0170859375, 1.0170224609375, 1.0186465454101563, 1.0175170288085937, 1.0169405517578125, 1.0179154052734376, 1.0179379272460938, 1.01726318359375, 1.0171544799804688, 1.0171340942382812, 1.0169609985351562, 1.0169047241210938, 1.017776123046875, 1.017396240234375, 1.0173921508789063, 1.0171002807617187, 1.0185277709960938, 1.017439208984375, 1.017312255859375, 1.017007080078125, 1.0172160034179687, 1.0172160034179687, 1.017218017578125, 1.0172958984375, 1.0172764282226563, 1.0170203857421876, 1.017691162109375, 2.111909912109375, 1.0176123046875, 1.0175139770507813, 1.0174228515625, 1.0174617309570313, 1.0175221557617187, 1.0169579467773437, 1.0177576904296874, 1.0171986083984375, 1.0168289184570312, 1.0170685424804689, 1.0173532104492187, 1.0175938720703126, 1.017185302734375, 1.01743408203125, 1.0171443481445313, 1.016764404296875, 1.0172661743164062, 1.0177013549804688, 1.0171986083984375, 1.017049072265625, 1.0170828857421874, 1.0174166870117187, 1.0170664672851562, 1.0174453735351563, 1.0168790893554687, 1.0178897705078125, 1.0175170288085937, 1.0171791381835937, 1.0170726928710938, 1.0169456176757812, 1.0167869262695313, 1.0170419311523438, 1.0173788452148438, 1.017333740234375, 1.0175242309570312, 1.018076171875, 1.01758056640625, 1.0170736694335938, 1.0170521850585938, 1.0168186645507813, 1.0173572998046876, 1.0176532592773437, 1.01709619140625, 1.0169476928710937, 1.0168115844726562, 1.0169046630859375, 1.0173613891601563, 1.0177177734375, 1.0168975219726561, 1.016826904296875, 1.0167296142578126, 1.017049072265625, 1.0175293579101563, 1.0174402465820314, 1.0174054565429687, 1.0173552856445311, 1.017469970703125, 1.0172620849609375, 1.0167603149414062, 1.0176635131835938, 1.0175570068359374, 1.0175396118164062, 2.113271728515625, 1.0164520874023437, 1.0175570068359374, 1.0175221557617187, 1.0169467163085937, 1.01684326171875, 1.01673779296875, 1.0166527709960937, 1.016406005859375, 1.01699072265625, 1.0166343383789063, 1.0166343383789063, 1.016573974609375, 1.0167418823242187, 1.0167675170898438, 1.0167920532226562, 1.0167357177734375, 1.0169302978515624, 1.0171954956054687, 1.0174054565429687, 1.0170368041992188, 1.0171791381835937, 1.0176737060546874, 1.0177310791015626, 1.0174033813476562, 1.0174044189453124, 1.017459716796875, 1.0175098876953126, 1.0173675537109375, 1.0176378784179687, 1.017259033203125, 1.0173460693359375, 1.0176511840820313, 1.0174822387695313, 1.0173767700195313, 1.0175150146484375, 1.0174218139648437, 1.0178508911132813, 1.0178406372070312, 1.0176204833984375, 1.0173767700195313, 1.0178017578125, 1.0179000244140626, 1.0172119140625, 1.0165678100585938, 1.0165330200195313, 1.0163292236328125, 1.0167285766601561, 1.0169968872070312, 1.0166988525390626, 1.0169210815429688, 1.0170480346679687, 1.0171238403320313, 1.0167817993164063, 1.0167633666992189, 1.0170337524414061, 1.0168043823242188, 1.0171392211914063, 1.017153564453125, 1.0167838745117188, 1.0169251708984375, 1.0174423217773438, 1.01701220703125, 2.113585205078125, 1.0164449462890626, 1.016585205078125, 1.0164551391601562, 1.01684326171875, 1.016680419921875, 1.016753173828125, 1.0173470458984375, 1.0173030395507812, 1.0169528198242188, 1.0168330078125, 1.0170194091796876, 1.01732763671875, 1.0167736206054687, 1.0168514404296876, 1.0169835815429686, 1.0166835327148438, 1.01719140625, 1.0171043701171876, 1.0166845703125, 1.017260009765625, 1.0176849975585938, 1.0174935302734376, 1.017529296875, 1.0172733154296876, 1.0169231567382813, 1.0173982543945312, 1.0170020141601563, 1.016616943359375, 1.0167767333984374, 1.0169467163085937, 1.0172088623046875, 1.01715966796875, 1.016932373046875, 1.0170357666015626, 1.0171320190429687, 1.01718017578125, 1.0174566650390624, 1.0167255249023437, 1.0167971801757814, 1.0167633666992189, 1.0170337524414061, 1.0170050659179688, 1.0170439453125, 1.01673779296875, 1.0168330078125, 1.0170828857421874, 1.017059326171875, 1.0168739624023437, 1.0171300048828125, 1.0171238403320313, 1.016974365234375, 1.0169026489257813, 1.0171504516601562, 1.0168207397460938, 1.0168319702148438, 1.0172692260742187, 1.0173624877929688, 1.016796142578125, 1.0168391723632813, 1.0171893920898438, 1.017365478515625, 1.016943603515625, 2.1139599609375, 1.0168893432617188, 1.0171945190429688, 1.016859619140625, 1.0165811157226563, 1.0167500610351563, 1.01697021484375, 1.016826904296875, 1.0168453369140624, 1.0170132446289062, 1.0168053588867187, 1.0173511962890625, 1.01734912109375, 1.0167859497070313, 1.0169036865234375, 1.0171238403320313, 1.0169866333007813, 1.0171996459960937, 1.0168934326171875, 1.0169763793945312, 1.0167183227539063, 1.0170234985351563, 1.0178928833007812, 1.0167654418945313, 1.0169620361328124, 1.0170040283203126, 1.0167449340820311, 1.0173562622070313, 1.016680419921875, 1.0172979125976562, 1.0165924072265624, 1.017101318359375, 1.0172999877929687, 1.0170224609375, 1.0172262573242188, 1.0168402099609375, 1.0170101928710937, 1.0167879638671875, 1.01705419921875, 1.0175529174804687, 1.0173265991210938, 1.01718017578125, 1.0174719848632812, 1.0170245361328125, 1.0174248657226563, 1.016958984375, 1.0166087646484374, 1.0166466674804688, 1.0171975708007812, 1.01707470703125, 1.0169210815429688, 1.0175795288085938, 1.0173245239257813, 1.0168084716796875, 1.0169722900390625, 1.0170470581054687, 1.0169948120117187, 1.0169896850585938, 1.0172006225585937, 1.0173716430664062, 1.0176337890625, 1.0176676025390625, 1.0172323608398437]",tokens/s,0.9681516343515072,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2f6-458a30d035f83012668160d9;54c3648e-4b81-46b2-bdbf-6538583da24c) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2986.500096,9259.450368,0.0,8613.003264,8211.364864,s,10,10.947190917968749,1.094719091796875,0.002010850578895603,1.0950899047851563,1.0971980224609374,1.0974085205078123,1.0975769189453124,"[1.0976190185546875, 1.096309326171875, 1.0916474609375, 1.09289697265625, 1.0933328857421876, 1.0920892333984376, 1.0948057861328124, 1.0953740234375, 1.0959649658203126, 1.0971512451171874]",tokens/s,233.8499455415552,kWh,1.291276744670338e-05,7.073689176177139e-06,6.076365972199904e-05,8.075011634487956e-05,tokens/kWh,3170274.070029042,MB,2986.500096,9330.753536,0.0,8684.306432,8503.627264,s,10,640.9193046874999,64.09193046875,0.016181839642145233,64.08713867187501,64.11070859375,64.119698046875,64.126889609375,"[64.0750625, 64.08454296875, 64.0822421875, 64.076171875, 64.07810546875, 64.09387890625, 64.10216796875, 64.089734375, 64.1087109375, 64.1286875]",tokens/s,0.9829630585197867,kWh,0.0007566528167658381,0.00041471410911453853,0.003539824637413,0.004711191563293376,tokens/kWh,13372.413147208052,,s,629,649.7487822875972,1.0329869352743999,0.1300160071749506,1.0172456665039062,1.0179758178710938,1.0183352172851563,2.110224638671875,"[1.016637451171875, 1.0170296020507812, 1.017143310546875, 1.0172344360351562, 1.0165718994140625, 1.016827880859375, 1.0167459716796876, 1.0169722900390625, 1.0170675048828124, 1.01732861328125, 1.0169548950195313, 1.0172835693359374, 1.0170890502929688, 1.01718017578125, 1.016700927734375, 1.0168627319335937, 1.016974365234375, 1.01762353515625, 1.0176194458007812, 1.0171002807617187, 1.0172579956054688, 1.0172610473632813, 1.01688525390625, 1.0172958984375, 1.016853515625, 1.0170009765625, 1.0169313354492187, 1.0170941162109375, 1.0171217651367188, 1.0168176879882813, 1.016890380859375, 1.0176798706054688, 1.0172395629882813, 1.017396240234375, 1.0169989013671874, 1.0170675048828124, 1.0171791381835937, 1.0177362060546875, 1.0171443481445313, 1.0171986083984375, 1.017133056640625, 1.0175907592773437, 1.01686376953125, 1.0169241333007812, 1.0167664794921876, 1.0166435546875, 1.0166865844726563, 1.01715966796875, 1.0166220703125, 1.0169251708984375, 1.0169763793945312, 1.0173429565429688, 1.016700927734375, 1.0169528198242188, 1.016858642578125, 1.0169600219726562, 1.0173624267578125, 1.0173972778320313, 1.0168811645507811, 1.0170654907226562, 1.0171514892578124, 1.0170183715820313, 2.113563720703125, 1.0165084228515624, 1.0165995483398438, 1.01704296875, 1.0168893432617188, 1.0168678588867188, 1.0165933837890626, 1.01682275390625, 1.0167122192382811, 1.0169712524414063, 1.017650146484375, 1.0175662231445313, 1.0175538940429687, 1.0177986450195313, 1.0178928833007812, 1.016896484375, 1.0167449340820311, 1.0172692260742187, 1.0168688354492188, 1.0166507568359375, 1.0167510986328125, 1.0167859497070313, 1.0173839111328125, 1.0172303466796875, 1.0169999389648436, 1.0167675170898438, 1.0172876586914064, 1.0169815063476562, 1.0168299560546874, 1.0169784545898437, 1.0168616943359374, 1.0168115234375, 1.0175344848632812, 1.0173378295898436, 1.0172190551757812, 1.017038818359375, 1.0170572509765625, 1.01707568359375, 1.0168831787109376, 1.0171309814453124, 1.01711669921875, 1.01686376953125, 1.017228271484375, 1.0172507934570312, 1.01673779296875, 1.0168883056640625, 1.0183065795898438, 1.0170347290039063, 1.0172979125976562, 1.0179051513671875, 1.0176614379882813, 1.017459716796875, 1.017312255859375, 1.0175733642578124, 1.01785498046875, 1.0175313720703125, 1.0179799194335937, 1.0179594116210937, 1.01766552734375, 1.017586669921875, 1.017523193359375, 1.0172456665039062, 1.0174935302734376, 2.110212158203125, 1.01697021484375, 1.0170562744140625, 1.0166824951171876, 1.017027587890625, 1.0168463134765624, 1.0169794311523437, 1.0168719482421875, 1.0174310302734375, 1.0172272338867188, 1.0172548828125, 1.0166978759765626, 1.0173173828125, 1.0170009765625, 1.0174668579101562, 1.0170675048828124, 1.0172139282226562, 1.017228271484375, 1.0174627685546875, 1.0169548950195313, 1.0171443481445313, 1.0174464111328125, 1.0173347778320312, 1.0173265991210938, 1.0174474487304688, 1.0170040283203126, 1.0171893920898438, 1.0172518310546874, 1.0172119140625, 1.0169682006835938, 1.0171197509765626, 1.0175057983398437, 1.017186279296875, 1.017196533203125, 1.0173778076171875, 1.016795166015625, 1.0170050659179688, 1.0167817993164063, 1.017259033203125, 1.0169620361328124, 1.0171812133789062, 1.0173296508789063, 1.0171238403320313, 1.0170531616210938, 1.0173737182617189, 1.0171729736328126, 1.0174576416015626, 1.01732861328125, 1.0171401977539063, 1.0170521850585938, 1.0174976196289063, 1.017228271484375, 1.0171412353515625, 1.0169722900390625, 1.0173092041015626, 1.0171893920898438, 1.0174105834960938, 1.0172794799804687, 1.0174299926757813, 1.0173112182617188, 1.017143310546875, 1.0173870239257812, 1.0169405517578125, 2.1099970703125, 1.0165718994140625, 1.0168822021484376, 1.0171340942382812, 1.0167890014648437, 1.0165309448242188, 1.0165729370117187, 1.0171259155273438, 1.0168145751953126, 1.0167357177734375, 1.0166978759765626, 1.0165995483398438, 1.017038818359375, 1.017069580078125, 1.0168657836914063, 1.0166671142578125, 1.0169508056640626, 1.0168002319335938, 1.0168309936523436, 1.01686376953125, 1.0169978637695312, 1.0169108276367187, 1.0175303955078125, 1.017259033203125, 1.0170203857421876, 1.017275390625, 1.0176215209960937, 1.0172620849609375, 1.0172682495117187, 1.0177730712890625, 1.017175048828125, 1.0169896850585938, 1.0171904296875, 1.0173450317382813, 1.0171514892578124, 1.017196533203125, 1.0175733642578124, 1.0169241333007812, 1.0169774169921875, 1.016858642578125, 1.0170009765625, 1.01692724609375, 1.0172477416992187, 1.0172241821289063, 1.0172979125976562, 1.017185302734375, 1.0175098876953126, 1.0170951538085937, 1.0170142822265624, 1.016974365234375, 1.0171340942382812, 1.0168934326171875, 1.0171658325195312, 1.0174044189453124, 1.0170419311523438, 1.0170009765625, 1.01764404296875, 1.017259033203125, 1.0177556762695312, 1.0170634155273437, 1.0172395629882813, 1.0169517822265626, 1.0172119140625, 2.11085009765625, 1.0167725830078125, 1.017186279296875, 1.0167930908203124, 1.0171504516601562, 1.0169845581054688, 1.0168934326171875, 1.0167664794921876, 1.0167285766601561, 1.016658935546875, 1.0167992553710938, 1.0168207397460938, 1.0168862915039063, 1.016806396484375, 1.0169343872070313, 1.01682177734375, 1.0170880126953126, 1.0174935302734376, 1.0173870239257812, 1.0168729858398438, 1.016933349609375, 1.0174105834960938, 1.0178846435546876, 1.0173112182617188, 1.017037841796875, 1.0165770263671876, 1.0169405517578125, 1.0169047241210938, 1.0172682495117187, 1.017038818359375, 1.0169456787109374, 1.0170654907226562, 1.0172426147460938, 1.01705419921875, 1.0172129516601562, 1.0170951538085937, 1.0170245361328125, 1.017217041015625, 1.0174197998046874, 1.016669189453125, 1.016816650390625, 1.0169917602539063, 1.0169262084960937, 1.0170654907226562, 1.0176153564453125, 1.0173839111328125, 1.0169313354492187, 1.017007080078125, 1.0170951538085937, 1.0168402099609375, 1.0171781005859375, 1.0170706176757813, 1.017101318359375, 1.0171300048828125, 1.0170357666015626, 1.016890380859375, 1.0173296508789063, 1.0192701416015626, 1.0171207885742188, 1.0169364624023438, 1.0173163452148437, 1.0173982543945312, 1.0175610961914063, 2.1102294921875, 1.0168330078125, 1.0173009643554687, 1.0168422241210937, 1.0167633666992189, 1.016748046875, 1.017270263671875, 1.0175191040039062, 1.0172374877929689, 1.0172579956054688, 1.0169036865234375, 1.01722314453125, 1.0172927856445313, 1.0177249145507812, 1.01745458984375, 1.0174157104492187, 1.0179625244140624, 1.0174013671875, 1.017365478515625, 1.0170818481445312, 1.016953857421875, 1.0169129028320312, 1.01753857421875, 1.0170408935546875, 1.017248779296875, 1.01697021484375, 1.0176993408203125, 1.0169528198242188, 1.0168002319335938, 1.0170091552734375, 1.0168760375976562, 1.0173685913085937, 1.0181334838867186, 1.0179051513671875, 1.0173931274414063, 1.0176051025390624, 1.0173521728515624, 1.0176522216796875, 1.0186475219726563, 1.017776123046875, 1.0168862915039063, 1.0169896850585938, 1.0183987426757812, 1.01768603515625, 1.018186767578125, 1.017185302734375, 1.0171566162109376, 1.0170480346679687, 1.0171371459960938, 1.0170992431640624, 1.0172692260742187, 1.0174402465820314, 1.0176215209960937, 1.0168811645507811, 1.0169937744140625, 1.017322509765625, 1.0178201904296875, 1.0176041259765625, 1.0179737548828125, 1.017111572265625, 1.017365478515625, 1.0178099365234374, 1.0178980102539064, 2.113080322265625, 1.0174884033203124, 1.0172631225585937, 1.0174566650390624, 1.0172620849609375, 1.0167183227539063, 1.0171514892578124, 1.017007080078125, 1.017343994140625, 1.0170880126953126, 1.016985595703125, 1.0176143188476563, 1.0176163940429688, 1.0170951538085937, 1.0172026977539061, 1.0171873168945313, 1.0176123046875, 1.017691162109375, 1.0170316772460937, 1.0167756958007812, 1.0169569091796875, 1.0173972778320313, 1.0178017578125, 1.0174781494140626, 1.0178662109375, 1.0171248779296875, 1.017670654296875, 1.0176788330078126, 1.0176256103515624, 1.0175354614257812, 1.0175897827148437, 1.0182471923828125, 1.01829736328125, 1.0176644897460938, 1.018271728515625, 1.0174146728515625, 1.0175713500976562, 1.0185062255859374, 1.0173245239257813, 1.0170501098632811, 1.0175221557617187, 1.0181396484375, 1.01762353515625, 1.0171248779296875, 1.0179276733398437, 1.0172764282226563, 1.0176091918945311, 1.0178416748046875, 1.0178385620117187, 1.0172415771484375, 1.0178160400390626, 1.0175958862304688, 1.0175139770507813, 1.0184058837890626, 1.0174474487304688, 1.0172610473632813, 1.0176348266601563, 1.0172989501953125, 1.0170726318359375, 1.0170726318359375, 1.0176849975585938, 1.0169343872070313, 1.0174033813476562, 2.112878662109375, 1.016953857421875, 1.0167767333984374, 1.0172262573242188, 1.017343994140625, 1.0176256103515624, 1.017802734375, 1.0175139770507813, 1.0169364624023438, 1.0173214721679686, 1.0175713500976562, 1.0171924438476563, 1.0175733642578124, 1.01711767578125, 1.0170449829101562, 1.0170040283203126, 1.0178385620117187, 1.0171453247070312, 1.0173972778320313, 1.0172129516601562, 1.0172528686523437, 1.0171781005859375, 1.0176245727539062, 1.01760205078125, 1.0175027465820312, 1.01718017578125, 1.017638916015625, 1.017111572265625, 1.0172241821289063, 1.0173060913085938, 1.017427978515625, 1.0172743530273438, 1.0173153076171875, 1.0171094970703125, 1.0171586303710938, 1.0175293579101563, 1.0171494140625, 1.0172057495117188, 1.0173634643554688, 1.0171945190429688, 1.0169733276367188, 1.0171525268554686, 1.0175272827148438, 1.0170572509765625, 1.0170685424804689, 1.0172467041015625, 1.01697021484375, 1.0168197021484375, 1.0174822387695313, 1.0170521850585938, 1.0171043701171876, 1.0171340942382812, 1.0171760864257813, 1.0169978637695312, 1.01732763671875, 1.0172794799804687, 1.0176839599609375, 1.017469970703125, 1.0180280151367187, 1.0174791870117188, 1.017849853515625, 1.0173368530273437, 1.01711669921875, 2.113290283203125, 1.017007080078125, 1.0168678588867188, 1.01684326171875, 1.0171268920898437, 1.016648681640625, 1.0167285766601561, 1.0171514892578124, 1.0172190551757812, 1.0169129028320312, 1.0168606567382812, 1.0169886474609375, 1.0172241821289063, 1.0173921508789063, 1.0177730712890625, 1.016896484375, 1.0171392211914063, 1.0172139282226562, 1.0171043701171876, 1.0171217651367188, 1.0177310791015626, 1.01746484375, 1.017628662109375, 1.017260009765625, 1.0183670043945312, 1.0173040771484374, 1.0178580322265625, 1.0175846557617187, 1.01725390625, 1.01791845703125, 1.0185062255859374, 1.0173634643554688, 1.0173931274414063, 1.0182645874023437, 1.0179307250976561, 1.0173163452148437, 1.0178989868164063, 1.0175098876953126, 1.0173235473632813, 1.0175590209960939, 1.01777099609375, 1.0175580444335937, 1.0173604125976563, 1.0171300048828125, 1.0181734619140625, 1.0173685913085937, 1.0175887451171874, 1.01719140625, 1.017218017578125, 1.0171586303710938, 1.0180546264648438, 1.0181591186523438, 1.0179061889648438, 1.0179747924804687, 1.0184212646484374, 1.0185728149414062, 1.0190858154296876, 1.0186967163085938, 1.0185779418945313, 1.0188789672851561, 1.0184888305664062, 1.0182215576171876, 1.01819189453125, 2.114828369140625, 1.0173347778320312, 1.0177095947265624, 1.0177669067382813, 1.0172406005859376, 1.0176143188476563, 1.017554931640625, 1.0172661743164062, 1.0174033813476562, 1.0175989990234375, 1.0177607421875, 1.0175191040039062, 1.0183218994140626, 1.0180669555664061, 1.0179829711914063, 1.0176931762695312, 1.0172927856445313, 1.0174801635742188, 1.0174003295898437, 1.0175467529296875, 1.0174320678710937, 1.0178211669921875, 1.0185543823242187, 1.0174586791992188, 1.0178580322265625, 1.0182103271484375, 1.0180054931640625, 1.0179921875, 1.0187222900390625, 1.0177330932617188, 1.0182236328125, 1.0179921875, 1.0184345703125, 1.0186956787109376, 1.018313720703125, 1.0182072143554688, 1.0180147094726562, 1.0174085083007813, 1.01800244140625, 1.0174351196289062, 1.0173193969726562, 1.0178539428710938, 1.0183413696289063, 1.0178672485351563, 1.0183229370117188, 1.0190479125976561, 1.01767578125, 1.0170265502929687, 1.0175949096679688, 1.0170900268554688, 1.0174924926757813, 1.0183259887695313, 1.0185646362304688, 1.0176317138671875, 1.0183250122070313, 1.0178457641601562, 1.018119140625, 1.01793994140625, 1.0190244140625, 1.0184181518554687, 1.0175703125, 1.0174607543945313, 1.0177362060546875]",tokens/s,0.9680664545233214,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3149.217792,5128.060928,0.0,4481.613824,4276.256768,s,10,3.149311706542969,0.3149311706542969,0.0015717060185626956,0.31493319702148437,0.3165461700439453,0.3169147476196289,0.31720960968017575,"[0.3172833251953125, 0.3164642639160156, 0.3132740478515625, 0.3144434814453125, 0.31399490356445314, 0.3119559020996094, 0.3140972900390625, 0.31542291259765626, 0.3162637023925781, 0.31611187744140623]",tokens/s,812.8760308741042,kWh,3.694201881686847e-06,2.024221956560268e-06,1.6665230346062446e-05,2.238365418430956e-05,tokens/kWh,11436917.220578322,MB,3149.217792,5128.060928,0.0,4481.613824,4465.661952,s,10,184.204947265625,18.420494726562502,0.008042192397515377,18.4165078125,18.4298185546875,18.432060644531248,18.433854316406247,"[18.41758203125, 18.4293203125, 18.415013671875, 18.434302734375, 18.41543359375, 18.427755859375, 18.427966796875, 18.41258203125, 18.409763671875, 18.4152265625]",tokens/s,3.420103582188457,kWh,0.0002180770655886995,0.00011952181134027568,0.0009648250270677349,0.00130242390399671,tokens/kWh,48371.3480739057,,s,629,186.7401061401367,0.2968841115105512,0.03741416455160097,0.29225473022460935,0.29295676879882815,0.29340855712890623,0.6063264038085938,"[0.29236837768554685, 0.29334426879882813, 0.29221682739257815, 0.29205914306640623, 0.2920222778320313, 0.2922946472167969, 0.2924492797851562, 0.2926929931640625, 0.29193624877929686, 0.2920509338378906, 0.29204376220703127, 0.2921123962402344, 0.291873779296875, 0.2915205078125, 0.29172222900390626, 0.29199871826171875, 0.2923089904785156, 0.2930401306152344, 0.2921902160644531, 0.29266842651367186, 0.29187994384765625, 0.2924564514160156, 0.2924277648925781, 0.2920263671875, 0.29210009765625, 0.2922608642578125, 0.2919096374511719, 0.29193624877929686, 0.29192294311523437, 0.29198541259765626, 0.29212261962890623, 0.29261724853515625, 0.2921973571777344, 0.29344256591796875, 0.2920785827636719, 0.2922250366210937, 0.29242059326171876, 0.2925332336425781, 0.2925537414550781, 0.29240524291992187, 0.2921902160644531, 0.2919741516113281, 0.29227825927734374, 0.2922506103515625, 0.2918768615722656, 0.29220248413085936, 0.29238067626953124, 0.29187481689453126, 0.29204788208007815, 0.2923376770019531, 0.2920396728515625, 0.29325619506835937, 0.2925700988769531, 0.29266226196289064, 0.29220452880859377, 0.2922823791503906, 0.2919710693359375, 0.29259878540039064, 0.2924472351074219, 0.29214208984375, 0.2923765869140625, 0.2920519714355469, 0.6092022705078125, 0.29223724365234377, 0.2937958374023438, 0.29247589111328126, 0.29319781494140623, 0.2925875244140625, 0.2924288024902344, 0.29230181884765627, 0.29243902587890624, 0.2923919372558594, 0.2927564697265625, 0.29259161376953124, 0.29243084716796874, 0.29296743774414064, 0.2923427734375, 0.29186456298828123, 0.2922250366210937, 0.29213082885742186, 0.29205810546875, 0.2923417663574219, 0.2929213562011719, 0.2921850891113281, 0.2936012878417969, 0.29257931518554686, 0.29359002685546876, 0.29242059326171876, 0.2921574401855469, 0.2920642700195312, 0.2926080017089844, 0.29214617919921876, 0.2923816833496094, 0.29216973876953123, 0.29204684448242185, 0.2921922607421875, 0.2923284606933594, 0.2922137451171875, 0.2923929748535156, 0.292546630859375, 0.2926417236328125, 0.2925137939453125, 0.2924062805175781, 0.2922659912109375, 0.29193319702148435, 0.29348370361328124, 0.29205792236328126, 0.2921850891113281, 0.2963548278808594, 0.292305908203125, 0.2925137939453125, 0.2923642883300781, 0.2921707458496094, 0.2921430969238281, 0.2926448669433594, 0.2927411193847656, 0.29220761108398435, 0.292232177734375, 0.29215130615234375, 0.2934067077636719, 0.29257626342773435, 0.29231924438476564, 0.2922465209960938, 0.2922557373046875, 0.2923213195800781, 0.6058434448242187, 0.2923243408203125, 0.29236737060546875, 0.2920919189453125, 0.2922966918945312, 0.2922536926269531, 0.2924298095703125, 0.29252813720703125, 0.29234893798828127, 0.2925455322265625, 0.2922403869628906, 0.29219635009765627, 0.2920130615234375, 0.29205810546875, 0.2921932678222656, 0.2922680358886719, 0.2919772033691406, 0.29201919555664063, 0.29235302734375, 0.2921318359375, 0.29199154663085936, 0.2918656005859375, 0.29201611328125, 0.294361083984375, 0.2921062316894531, 0.29225473022460935, 0.2920929260253906, 0.2933616638183594, 0.29293466186523437, 0.29222708129882813, 0.292073486328125, 0.29259878540039064, 0.2922178649902344, 0.29183078002929685, 0.29233355712890624, 0.2924349365234375, 0.291989501953125, 0.29248818969726564, 0.29223526000976563, 0.29176217651367187, 0.2918082580566406, 0.2931097717285156, 0.29253836059570315, 0.29301043701171875, 0.2920130615234375, 0.2920407409667969, 0.2919710388183594, 0.29457406616210935, 0.2923519897460938, 0.2923028564453125, 0.2920929260253906, 0.2918901672363281, 0.29224755859375, 0.2919526672363281, 0.291937255859375, 0.29211032104492185, 0.29193011474609376, 0.29155224609375, 0.2918410339355469, 0.2920980529785156, 0.292168701171875, 0.29260595703125, 0.292453369140625, 0.6075504760742187, 0.2925127563476563, 0.2923765869140625, 0.29210931396484374, 0.29227825927734374, 0.2921656188964844, 0.29187890625, 0.29214105224609377, 0.29254144287109374, 0.29202023315429687, 0.29206732177734374, 0.293718017578125, 0.2936227722167969, 0.29316915893554685, 0.29292236328125, 0.29260595703125, 0.292431884765625, 0.2927585144042969, 0.2929725341796875, 0.29310772705078125, 0.29271142578125, 0.2924615783691406, 0.29305242919921876, 0.29276776123046877, 0.2925915832519531, 0.29245440673828127, 0.29250765991210936, 0.2925107116699219, 0.29247589111328126, 0.29257830810546875, 0.29249331665039063, 0.2926243896484375, 0.2935541687011719, 0.2926243896484375, 0.29202532958984373, 0.2919198608398437, 0.292021240234375, 0.29210418701171875, 0.2919884948730469, 0.2921758728027344, 0.2921983947753906, 0.292052978515625, 0.2918523254394531, 0.2925229797363281, 0.2951219177246094, 0.2927698059082031, 0.2923653259277344, 0.29271551513671873, 0.2925813903808594, 0.29277389526367187, 0.2923816833496094, 0.2925250549316406, 0.2931272277832031, 0.2930205993652344, 0.29253631591796875, 0.2923786315917969, 0.2930882568359375, 0.2928087158203125, 0.29209906005859376, 0.2928845520019531, 0.29307998657226564, 0.2928609313964844, 0.2925823974609375, 0.6065142211914063, 0.2926080322265625, 0.29234066772460937, 0.29249432373046874, 0.2920980529785156, 0.2918707275390625, 0.2923858032226562, 0.2918133850097656, 0.2919024963378906, 0.2923048706054687, 0.29231924438476564, 0.29207757568359377, 0.29218612670898436, 0.29211032104492185, 0.29206936645507814, 0.29310873413085936, 0.2925721740722656, 0.2921850891113281, 0.292738037109375, 0.2921891784667969, 0.2920550537109375, 0.2918143920898438, 0.2921983947753906, 0.29208370971679687, 0.29210214233398435, 0.29210214233398435, 0.2925189208984375, 0.2919208984375, 0.29203353881835936, 0.2927615966796875, 0.291736572265625, 0.29209701538085936, 0.2920550537109375, 0.2918656005859375, 0.2918370056152344, 0.2924051818847656, 0.2920263671875, 0.2931199951171875, 0.29166488647460936, 0.29208370971679687, 0.2922158203125, 0.2921277465820312, 0.2917294006347656, 0.29210012817382813, 0.29211541748046876, 0.29188607788085935, 0.29205810546875, 0.29206732177734374, 0.29190142822265625, 0.2926776428222656, 0.29242471313476565, 0.2921656188964844, 0.29246054077148437, 0.2923315124511719, 0.29213287353515627, 0.2920325012207031, 0.2924195861816406, 0.29780581665039063, 0.29234072875976563, 0.2921574401855469, 0.29292031860351564, 0.2927329406738281, 0.2922823791503906, 0.6048440551757812, 0.29242266845703124, 0.29194342041015625, 0.2922823791503906, 0.29246875, 0.2921922607421875, 0.2924472351074219, 0.2927779846191406, 0.29216461181640624, 0.29211953735351565, 0.29237759399414065, 0.29223526000976563, 0.2919372863769531, 0.2918973388671875, 0.2923294982910156, 0.2918020935058594, 0.29216152954101565, 0.2923212890625, 0.29266021728515623, 0.2927503356933594, 0.2927216491699219, 0.29210418701171875, 0.2921973876953125, 0.29242471313476565, 0.29217279052734374, 0.29210931396484374, 0.2921912231445313, 0.292021240234375, 0.29214617919921876, 0.29203866577148435, 0.29192190551757813, 0.2918973388671875, 0.29224346923828126, 0.291884033203125, 0.291857421875, 0.29202532958984373, 0.2921379699707031, 0.2925025329589844, 0.2959308776855469, 0.2924974060058594, 0.2935582580566406, 0.2933534851074219, 0.2929541015625, 0.29257931518554686, 0.2927984619140625, 0.29274725341796876, 0.29252197265625, 0.29275750732421874, 0.2934097900390625, 0.29243289184570315, 0.2923284606933594, 0.2925189208984375, 0.2929377136230469, 0.29240524291992187, 0.2938306579589844, 0.29293670654296877, 0.29282302856445314, 0.29310772705078125, 0.2923100280761719, 0.2923991088867188, 0.29276568603515624, 0.2923294677734375, 0.2927698059082031, 0.6069278564453126, 0.2922486267089844, 0.29226181030273435, 0.29200384521484374, 0.29218099975585937, 0.2922465209960938, 0.29204376220703127, 0.2927329406738281, 0.2925977478027344, 0.292210693359375, 0.29327462768554685, 0.2930093994140625, 0.29313638305664064, 0.29303192138671874, 0.292389892578125, 0.29288653564453127, 0.29291519165039065, 0.29289266967773436, 0.2928721923828125, 0.292701171875, 0.29249331665039063, 0.29188607788085935, 0.29253631591796875, 0.29483929443359375, 0.2933084106445312, 0.29292849731445314, 0.2923530883789062, 0.29386029052734375, 0.29232742309570314, 0.2925465698242187, 0.29262130737304687, 0.292274169921875, 0.2924564514160156, 0.292242431640625, 0.2929407958984375, 0.2920704040527344, 0.2924810791015625, 0.292454345703125, 0.2922188720703125, 0.29238885498046874, 0.29204888916015626, 0.2919598083496094, 0.2920232849121094, 0.2920048522949219, 0.29257220458984373, 0.29197512817382815, 0.2930882568359375, 0.29200384521484374, 0.292384765625, 0.2927626342773437, 0.2920519714355469, 0.2919045104980469, 0.2923561096191406, 0.29254348754882814, 0.2925537414550781, 0.2922148132324219, 0.292211669921875, 0.2919024658203125, 0.29274008178710936, 0.29211032104492185, 0.29219430541992186, 0.2925619201660156, 0.2921574401855469, 0.6078638305664062, 0.2920642700195312, 0.29204171752929686, 0.29206219482421875, 0.2920550537109375, 0.2921185302734375, 0.29250149536132813, 0.29204888916015626, 0.29186456298828123, 0.2920867919921875, 0.29199871826171875, 0.29200689697265625, 0.29195059204101564, 0.2919956359863281, 0.2926612548828125, 0.291999755859375, 0.29500723266601564, 0.2922413940429687, 0.2919342041015625, 0.2922650146484375, 0.2921286926269531, 0.29177548217773436, 0.2920263671875, 0.29223629760742187, 0.2919342041015625, 0.2920151062011719, 0.29193624877929686, 0.29171505737304687, 0.29255679321289063, 0.29226290893554685, 0.29204888916015626, 0.29261932373046873, 0.2921389465332031, 0.2924472351074219, 0.292569091796875, 0.29272988891601565, 0.2924666442871094, 0.29255474853515623, 0.2924288024902344, 0.29235711669921877, 0.29266842651367186, 0.2926305236816406, 0.292279296875, 0.29281585693359374, 0.29269195556640626, 0.2922127380371094, 0.2923765869140625, 0.29226190185546874, 0.29332275390625, 0.29225677490234375, 0.2925455322265625, 0.29204684448242185, 0.2919465026855469, 0.29239501953125, 0.292094970703125, 0.2916546630859375, 0.29185330200195314, 0.2919178161621094, 0.29201202392578124, 0.2919045104980469, 0.2917683715820312, 0.29191879272460936, 0.29268173217773436, 0.609132568359375, 0.2924984436035156, 0.2927001647949219, 0.29203353881835936, 0.2919598083496094, 0.29245440673828127, 0.29231411743164065, 0.2920304565429688, 0.29216973876953123, 0.2918604736328125, 0.2920816650390625, 0.29198541259765626, 0.29182769775390627, 0.29221478271484375, 0.29226290893554685, 0.29183078002929685, 0.2922137451171875, 0.2939207763671875, 0.29266226196289064, 0.292632568359375, 0.2929541015625, 0.29197210693359377, 0.2920867919921875, 0.29224346923828126, 0.2920704040527344, 0.292126708984375, 0.2921769104003906, 0.2920243225097656, 0.292173828125, 0.2919126892089844, 0.2919823303222656, 0.2920867919921875, 0.2925608825683594, 0.29213287353515627, 0.29228546142578127, 0.29201608276367186, 0.2929305725097656, 0.2924963989257813, 0.2920181884765625, 0.2923519897460938, 0.29213082885742186, 0.2921359252929687, 0.29183795166015625, 0.29199258422851565, 0.2920724487304687, 0.29192807006835936, 0.292031494140625, 0.2920294494628906, 0.292173828125, 0.2920765380859375, 0.29200689697265625, 0.2918758544921875, 0.2921349182128906, 0.29268896484375, 0.2934025573730469, 0.292505615234375, 0.2919270324707031, 0.29225164794921876, 0.292105224609375, 0.29204071044921875, 0.2915840148925781, 0.2917908630371094, 0.2920570983886719, 0.6085621948242188, 0.2921451416015625, 0.29239706420898437, 0.29239501953125, 0.29228955078125, 0.2920243225097656, 0.2924892272949219, 0.2920345458984375, 0.2921123962402344, 0.291962890625, 0.2920796203613281, 0.29241549682617185, 0.29221682739257815, 0.2918553466796875, 0.291915771484375, 0.29186663818359376, 0.2920427551269531, 0.2920980529785156, 0.29218817138671876, 0.29189529418945315, 0.29189837646484373, 0.29198028564453127, 0.29211953735351565, 0.29269195556640626, 0.2930841674804687, 0.2925148010253906, 0.29509222412109376, 0.29249127197265623, 0.2924369812011719, 0.29232537841796874, 0.2920162048339844, 0.29203448486328126, 0.29213900756835937, 0.2920243225097656, 0.29217279052734374, 0.2922342529296875, 0.2917130126953125, 0.2923991088867188, 0.29214718627929687, 0.2926458740234375, 0.2934947814941406, 0.2932357177734375, 0.2925066223144531, 0.2929162292480469, 0.29218817138671876, 0.2923735046386719, 0.2922332153320312, 0.2920325012207031, 0.2919761962890625, 0.29228543090820314, 0.29216461181640624, 0.29214718627929687, 0.29235302734375, 0.292274169921875, 0.2920376281738281, 0.29199154663085936, 0.2923724670410156, 0.2924646301269531, 0.29217279052734374, 0.29284454345703126, 0.29187277221679686, 0.2919321594238281, 0.29226495361328125]",tokens/s,3.368317674233167,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2013.822976,5480.382464,0.0,4833.93536,4503.282688,s,10,5.711301574707031,0.5711301574707031,0.0015604684158887843,0.5710088500976562,0.5724867248535156,0.573356430053711,0.5740521942138671,"[0.5715228271484375, 0.5742261352539062, 0.5684718017578125, 0.5703966064453125, 0.570494873046875, 0.5693699340820313, 0.5704375610351563, 0.5720609130859375, 0.57229345703125, 0.5720274658203125]",tokens/s,448.2340787846278,kWh,6.721677934681928e-06,3.683185084507083e-06,3.147118258432934e-05,4.187604560351835e-05,tokens/kWh,6113280.189438215,MB,2014.928896,5480.382464,0.0,4833.93536,4688.699392,s,10,334.74296874999993,33.474296875,0.0037315247196552594,33.473017578124995,33.479158984375,33.4801732421875,33.4809846484375,"[33.47309765625, 33.47263671875, 33.4715390625, 33.4729375, 33.47534765625, 33.47058984375, 33.47893359375, 33.4811875, 33.4778203125, 33.46887890625]",tokens/s,1.8820410249468458,kWh,0.00039524016447641235,0.00021662581418956808,0.0018242358853134645,0.002436101863979445,tokens/kWh,25860.987560301615,,s,629,339.3508396606445,0.5395084891266209,0.06790528508400501,0.531294189453125,0.5317521484375,0.5319190551757812,1.1017576318359374,"[0.5316085815429688, 0.5315625, 0.5310873413085937, 0.5312276611328125, 0.5310341186523437, 0.5311795043945312, 0.5312860107421875, 0.53180517578125, 0.5313863525390625, 0.5315205078125, 0.5309071655273437, 0.5309808349609375, 0.5307955322265625, 0.5312429809570313, 0.530924560546875, 0.5313556518554687, 0.5308876953125, 0.531230712890625, 0.5310802001953125, 0.5317345581054688, 0.5312921752929688, 0.5313382568359375, 0.5308221435546875, 0.5310607299804687, 0.5308549194335938, 0.5315635375976563, 0.5311006469726562, 0.5312788696289062, 0.5312388916015625, 0.5312849731445313, 0.5308907470703125, 0.5311416015625, 0.5308528442382813, 0.5311743774414063, 0.531367919921875, 0.5315932006835937, 0.5311682739257813, 0.5317539672851562, 0.5317908325195313, 0.531473388671875, 0.53108837890625, 0.53146728515625, 0.531220458984375, 0.5315338134765625, 0.5310525512695312, 0.531900390625, 0.5317529296875, 0.5317069091796875, 0.53103515625, 0.53153076171875, 0.5311344604492187, 0.5311918334960938, 0.5308630981445313, 0.5311641845703124, 0.5309531860351563, 0.531577880859375, 0.5316065063476563, 0.5314017333984375, 0.5316864013671875, 0.5318963012695312, 0.5315604248046875, 0.53161279296875, 1.1062353515625, 0.5315419921875, 0.5316771850585937, 0.5312737426757812, 0.5311528930664062, 0.5314109497070313, 0.5312112426757812, 0.5310986328125, 0.5311057739257813, 0.53089892578125, 0.5317980346679687, 0.531167236328125, 0.53102490234375, 0.5308078002929687, 0.5314508666992187, 0.5309224853515625, 0.5310689086914062, 0.5308067626953125, 0.5315430297851562, 0.5309972534179688, 0.5317294311523437, 0.5312696533203125, 0.5314498291015625, 0.5310535888671875, 0.5314406127929687, 0.5309706420898438, 0.5315850219726562, 0.5310156860351563, 0.5313699951171875, 0.5310637817382813, 0.5316474609375, 0.5311948852539062, 0.53110888671875, 0.53115185546875, 0.531330078125, 0.5310965576171875, 0.5311754150390625, 0.5309706420898438, 0.5319178466796874, 0.532337646484375, 0.5315972900390625, 0.531103759765625, 0.5311692504882812, 0.5309296875, 0.5313453979492188, 0.53089794921875, 0.532917236328125, 0.5309666137695312, 0.5310463256835938, 0.5309081420898437, 0.5315809326171875, 0.5311968994140625, 0.5312440185546875, 0.5310320434570313, 0.5312368774414062, 0.5308692626953125, 0.5317509155273438, 0.5314529418945313, 0.5314232177734375, 0.5311610717773437, 0.531356689453125, 0.5314232177734375, 0.5313045043945313, 1.1011102294921875, 0.5311477661132813, 0.5314263305664062, 0.531145751953125, 0.5313720092773437, 0.5311477661132813, 0.5315952758789062, 0.5309235229492187, 0.5311928100585938, 0.531162109375, 0.5311211547851562, 0.53140478515625, 0.5317939453125, 0.53121435546875, 0.5318656005859375, 0.531483642578125, 0.5315000610351562, 0.5312716674804687, 0.5315645141601563, 0.531324951171875, 0.5316557006835938, 0.5313607788085938, 0.5315901489257813, 0.5309573364257812, 0.5312061157226563, 0.5308528442382813, 0.5311078491210938, 0.5310279541015624, 0.5312245483398438, 0.5308385009765625, 0.5313310546875, 0.53131982421875, 0.5318225708007812, 0.5312051391601562, 0.5311702880859375, 0.5310709838867187, 0.5313167114257813, 0.5311600341796875, 0.5312184448242188, 0.531252197265625, 0.5313607788085938, 0.5311314086914063, 0.5312163696289063, 0.5310596923828125, 0.531451904296875, 0.5310115966796874, 0.5313280029296875, 0.53153076171875, 0.5314334716796875, 0.5314508666992187, 0.5315317993164063, 0.5316935424804687, 0.5310279541015624, 0.5312266235351563, 0.5312102661132813, 0.531051513671875, 0.5313505249023438, 0.5309439697265625, 0.53108837890625, 0.5311426391601562, 0.5314908447265625, 0.5311590576171875, 0.5316188354492187, 1.1020093994140625, 0.5310126342773438, 0.5312286987304687, 0.5310289916992188, 0.531694580078125, 0.5315460815429688, 0.5315625, 0.5309337768554687, 0.5313526000976563, 0.5313218383789062, 0.5313914794921875, 0.53136279296875, 0.5317498779296875, 0.5313587036132813, 0.5314078979492187, 0.531162109375, 0.5312286987304687, 0.531251220703125, 0.5314600830078124, 0.5311160278320313, 0.53119384765625, 0.5310310668945313, 0.5316546630859375, 0.531409912109375, 0.531567626953125, 0.5313894653320312, 0.5315020751953125, 0.531135498046875, 0.5310525512695312, 0.5308528442382813, 0.5313935546875, 0.5313812255859375, 0.5312890625, 0.530966552734375, 0.531019775390625, 0.5310494995117188, 0.5313024291992188, 0.5313894653320312, 0.531294189453125, 0.5311016845703125, 0.5310525512695312, 0.5311856689453125, 0.531736572265625, 0.531641357421875, 0.5321881713867187, 0.5313126220703125, 0.5313751220703125, 0.5310965576171875, 0.531794921875, 0.5309716186523438, 0.5311610717773437, 0.5310167236328125, 0.5313290405273438, 0.5308538818359375, 0.5310453491210938, 0.5313873901367188, 0.5312819213867187, 0.5309849853515625, 0.531398681640625, 0.5310084838867187, 0.5321144409179688, 0.531578857421875, 0.5316700439453125, 1.10214453125, 0.530830322265625, 0.5315020751953125, 0.5308692626953125, 0.5310894165039063, 0.5316024169921875, 0.5316566772460938, 0.5311129760742187, 0.5313003540039063, 0.5309224853515625, 0.5312870483398437, 0.5310771484375, 0.5316566772460938, 0.5308528442382813, 0.5312696533203125, 0.5311334228515625, 0.5315010375976562, 0.5314529418945313, 0.5316433715820312, 0.5310289916992188, 0.5314703369140625, 0.5313914794921875, 0.5314805908203125, 0.5312102661132813, 0.5313157348632812, 0.5312808837890625, 0.5316044921875, 0.5312051391601562, 0.5314078979492187, 0.5316720581054688, 0.5313822631835937, 0.5311006469726562, 0.53110888671875, 0.5308703002929688, 0.5316167602539063, 0.5314221801757812, 0.531800048828125, 0.5311160278320313, 0.5314561157226563, 0.531411865234375, 0.5317734375, 0.5317805786132812, 0.5310750732421875, 0.5310975952148438, 0.53131982421875, 0.53121435546875, 0.531240966796875, 0.530924560546875, 0.5315112915039063, 0.5309634399414063, 0.53119384765625, 0.5311242065429688, 0.5314119873046875, 0.5314744262695312, 0.5316505737304688, 0.5320745239257813, 0.531435546875, 0.53260595703125, 0.5313802490234375, 0.5314990234375, 0.531493896484375, 0.531061767578125, 0.5313065185546875, 1.1010294189453125, 0.5310013427734375, 0.53124609375, 0.5309849853515625, 0.5314959106445313, 0.5311447143554687, 0.531198974609375, 0.531135498046875, 0.5317611694335938, 0.5310453491210938, 0.5312368774414062, 0.5310105590820312, 0.5315205688476563, 0.5312000122070313, 0.531442626953125, 0.5309010009765625, 0.53104638671875, 0.5309081420898437, 0.5313760986328125, 0.5308211059570312, 0.5312214965820312, 0.5310167236328125, 0.5315963134765626, 0.531188720703125, 0.5312471313476562, 0.5308661499023437, 0.5318123779296875, 0.5314027709960938, 0.5315277099609375, 0.5311959228515625, 0.5315491943359375, 0.5311867065429687, 0.5311385498046876, 0.5308538818359375, 0.5313402709960937, 0.5308897094726562, 0.53110888671875, 0.5310873413085937, 0.532853759765625, 0.5313351440429688, 0.531751953125, 0.5309685668945312, 0.5313668823242188, 0.5309788208007813, 0.5314406127929687, 0.5319198608398438, 0.5315972900390625, 0.5316341552734375, 0.531399658203125, 0.5313290405273438, 0.5312604370117188, 0.531052490234375, 0.5311856689453125, 0.5311262817382812, 0.5311273193359375, 0.5309450073242188, 0.5313003540039063, 0.5310310668945313, 0.531162109375, 0.5316484985351563, 0.5313546142578125, 0.531051513671875, 0.5314641723632813, 1.10335595703125, 0.5312788696289062, 0.5317376098632812, 0.5317816162109374, 0.5310443725585937, 0.5310945434570312, 0.531515380859375, 0.5312010498046875, 0.5312890625, 0.5315604248046875, 0.5310709838867187, 0.5312757568359375, 0.5313177490234375, 0.5313668823242188, 0.5315430297851562, 0.5313382568359375, 0.531462158203125, 0.5310822143554688, 0.5318031616210938, 0.5317324829101563, 0.5316137084960938, 0.5309522094726562, 0.5314713745117188, 0.531346435546875, 0.5311826171875, 0.5308856201171875, 0.5316904907226563, 0.5310525512695312, 0.5313648681640625, 0.5309481201171875, 0.5315020751953125, 0.5309798583984375, 0.5310975952148438, 0.5311068115234375, 0.53123583984375, 0.5308887329101563, 0.5311610717773437, 0.5312593994140625, 0.5315113525390625, 0.5320181274414062, 0.53134130859375, 0.5311498413085938, 0.5313812255859375, 0.5318276977539063, 0.5313341674804688, 0.5315061645507813, 0.5313771362304688, 0.5312665405273438, 0.53157373046875, 0.5311734008789063, 0.5318348999023438, 0.5309931640625, 0.5319618530273438, 0.5310545654296875, 0.531431396484375, 0.5314805908203125, 0.5322670288085938, 0.5313526000976563, 0.5318143920898437, 0.5314058227539062, 0.5316392822265625, 0.5314242553710937, 0.5316956176757812, 1.1039549560546875, 0.5310596923828125, 0.531430419921875, 0.5309603881835937, 0.5313013916015625, 0.5310545654296875, 0.5314334716796875, 0.5311129760742187, 0.5314866943359375, 0.5309327392578125, 0.5313535766601563, 0.5317621459960937, 0.5320017700195312, 0.5313474731445312, 0.5321390380859375, 0.5313034057617188, 0.5315491943359375, 0.5315614624023437, 0.5321195678710937, 0.5317386474609375, 0.531430419921875, 0.53125732421875, 0.5315266723632812, 0.5314437255859376, 0.5318656005859375, 0.5314652099609375, 0.5318246459960938, 0.5309419555664062, 0.5313320922851562, 0.5310873413085937, 0.5319567260742187, 0.5315020751953125, 0.5317171020507813, 0.5314866943359375, 0.531135498046875, 0.5316986694335938, 0.5315419921875, 0.531198974609375, 0.5315286865234375, 0.5312481079101562, 0.53157373046875, 0.5313710327148438, 0.531556396484375, 0.5311170043945312, 0.5320908813476563, 0.5312860107421875, 0.5311273193359375, 0.53108837890625, 0.5317069091796875, 0.5314140014648437, 0.5318666381835937, 0.5313013916015625, 0.5315419921875, 0.5312225341796875, 0.531567626953125, 0.5311692504882812, 0.53146728515625, 0.5312901000976562, 0.5316116333007812, 0.5311057739257813, 0.5314898071289063, 0.5310587158203125, 0.5314539794921875, 1.1042969970703125, 0.5312860107421875, 0.5315072021484375, 0.5316034545898437, 0.5322137451171876, 0.5317847290039063, 0.5316690063476562, 0.5311959228515625, 0.531631103515625, 0.5317171020507813, 0.531820556640625, 0.5314273071289063, 0.5316249389648438, 0.5310904541015625, 0.531863525390625, 0.5312501831054688, 0.53151025390625, 0.531252197265625, 0.5320970458984375, 0.5310699462890625, 0.5312184448242188, 0.5309183959960937, 0.5319987182617187, 0.531577880859375, 0.5319649047851562, 0.5313546142578125, 0.53127783203125, 0.5312000122070313, 0.5314058227539062, 0.5309552612304688, 0.5314765014648437, 0.5311477661132813, 0.5315399780273438, 0.5311395874023438, 0.5313935546875, 0.5317590942382813, 0.531430419921875, 0.5314949340820313, 0.53096240234375, 0.5309450073242188, 0.5312819213867187, 0.5316137084960938, 0.5311641845703124, 0.5311508178710938, 0.5317099609375, 0.531009521484375, 0.5310576782226563, 0.530977783203125, 0.5313966064453125, 0.531272705078125, 0.531178466796875, 0.53085693359375, 0.5311764526367188, 0.5310115966796874, 0.531294189453125, 0.5309757690429687, 0.5316322021484375, 0.5310668334960937, 0.531282958984375, 0.531072021484375, 0.5319813232421875, 0.5315072021484375, 0.5314345092773437, 1.1038515625, 0.5315747680664062, 0.5315563354492188, 0.5314283447265625, 0.5311314086914063, 0.5308887329101563, 0.5313218383789062, 0.5309593505859375, 0.5309398803710937, 0.5308262329101563, 0.5310136108398438, 0.5309296875, 0.5314590454101562, 0.5310699462890625, 0.5316587524414063, 0.5311928100585938, 0.5312696533203125, 0.5310955810546875, 0.5310802001953125, 0.5312481079101562, 0.5316751098632813, 0.5312921752929688, 0.5321625366210937, 0.531294189453125, 0.5315594482421875, 0.5310873413085937, 0.5322465209960937, 0.5310218505859375, 0.5312512817382813, 0.5308609619140625, 0.531178466796875, 0.5309429931640625, 0.5310105590820312, 0.5310392456054688, 0.5313372192382813, 0.5314283447265625, 0.5313710327148438, 0.5309235229492187, 0.5312706298828125, 0.5308815307617187, 0.5310955810546875, 0.5310003051757812, 0.5311764526367188, 0.5315164184570312, 0.53134130859375, 0.5313402709960937, 0.5311385498046876, 0.5311682739257813, 0.53187890625, 0.5310679321289062, 0.5314652099609375, 0.5309286499023438, 0.5317437744140625, 0.5314334716796875, 0.5315911865234375, 0.5309706420898438, 0.5311815795898438, 0.5312235717773437, 0.5312030639648437, 0.5309204711914063, 0.5312102661132813, 0.5308764038085938, 0.5311703491210937]",tokens/s,1.853538952869569,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1485.733888,2103.967744,0.0,1457.52064,1272.750592,s,10,1.3342720336914062,0.13342720336914063,0.0017667378558565091,0.1333674545288086,0.13440001983642577,0.13625353012084962,0.13773633834838866,"[0.13810704040527344, 0.13359715270996095, 0.13159478759765625, 0.13398812866210938, 0.13163987731933594, 0.132114013671875, 0.13270646667480468, 0.1334988098144531, 0.13378965759277345, 0.13323609924316407]",tokens/s,1918.6492224658912,kWh,1.5465521064155546e-06,8.474274160367454e-07,6.439467633532405e-06,8.833447155984705e-06,tokens/kWh,28980758.641495775,MB,1485.733888,2103.967744,0.0,1457.52064,1369.423872,s,10,77.59171484375001,7.759171484375001,0.021550685509445954,7.750958740234375,7.78698193359375,7.79351220703125,7.798736425781249,"[7.7519140625, 7.7428955078125, 7.78553076171875, 7.781552734375, 7.80004248046875, 7.76520556640625, 7.75000341796875, 7.7375029296875, 7.73547216796875, 7.74159521484375]",tokens/s,8.119423591406116,kWh,9.154879443379708e-05,5.017541917467626e-05,0.00037131786234986455,0.0005130420759583379,tokens/kWh,122796.94581057319,,s,629,78.66502353668211,0.12506363042397795,0.0158250884711649,0.12294553375244141,0.1240623077392578,0.1244600341796875,0.2556754669189453,"[0.12330188751220703, 0.12316671752929688, 0.12365106964111328, 0.12313600158691407, 0.12294246673583985, 0.12248063659667968, 0.12371660614013671, 0.12260147094726563, 0.12286566162109375, 0.12282879638671874, 0.12315750122070312, 0.12320972442626953, 0.12280012512207031, 0.12268236541748047, 0.122640380859375, 0.12257997131347656, 0.12299161529541015, 0.1229834213256836, 0.12259737396240235, 0.12290866851806641, 0.12273561859130859, 0.12333164978027343, 0.12384864044189453, 0.12324454498291015, 0.12292301177978515, 0.12274278259277344, 0.1227489242553711, 0.12272640228271485, 0.12298239898681641, 0.12477235412597656, 0.12357427215576172, 0.12362035369873046, 0.12306233978271484, 0.12299974060058594, 0.12285030364990235, 0.12300287628173828, 0.12316060638427734, 0.1231595230102539, 0.12301516723632812, 0.12314726257324218, 0.12401152038574219, 0.12320358276367188, 0.12276121520996094, 0.12288307189941407, 0.12321382141113281, 0.12264653015136719, 0.12301721954345703, 0.12259737396240235, 0.12273664093017578, 0.122893310546875, 0.12342066955566407, 0.1232015380859375, 0.12298239898681641, 0.12279090881347657, 0.1230448989868164, 0.12328444671630859, 0.12287181091308594, 0.12288511657714844, 0.12308684539794922, 0.1227540512084961, 0.12283084869384765, 0.12267212677001953, 0.25649664306640624, 0.1227540512084961, 0.12280220794677735, 0.12263113403320312, 0.12264345550537109, 0.12267417907714843, 0.1228400650024414, 0.12283596801757812, 0.12282470703125, 0.12279603576660156, 0.12281446075439453, 0.12269875335693359, 0.12270694732666015, 0.12278681945800782, 0.12276121520996094, 0.12260044860839844, 0.12260659027099609, 0.12257791900634765, 0.12294041442871094, 0.1228779525756836, 0.12287999725341797, 0.12280831909179687, 0.12294451141357422, 0.12248985290527344, 0.12257484436035156, 0.1226055679321289, 0.12298137664794923, 0.12271308898925781, 0.12264447784423828, 0.12263935852050781, 0.12252365112304688, 0.12261990356445313, 0.12269977569580078, 0.12264345550537109, 0.12255129241943359, 0.12263833618164062, 0.12267212677001953, 0.12288102722167969, 0.12292915344238281, 0.12263935852050781, 0.12389888000488282, 0.12297216033935547, 0.12294553375244141, 0.12293427276611328, 0.12263526153564454, 0.12274585723876953, 0.12553011322021485, 0.12401561737060547, 0.12355072021484376, 0.12335718536376954, 0.12291584014892579, 0.12288819122314452, 0.12369510650634766, 0.12324147033691406, 0.12273667144775391, 0.12291478729248047, 0.12294758605957032, 0.12287385559082031, 0.12286669158935547, 0.12300697326660157, 0.12317391967773438, 0.12299874877929687, 0.1229271011352539, 0.25462066650390625, 0.12265164947509766, 0.1226270751953125, 0.12303667449951172, 0.12276227569580078, 0.12365821075439454, 0.12293939208984375, 0.12266496276855468, 0.12368895721435547, 0.12677529907226562, 0.12441395568847656, 0.12445696258544922, 0.12463513946533203, 0.12443033599853516, 0.12438224029541016, 0.12425520324707032, 0.12467916870117188, 0.12446310424804688, 0.12442931365966797, 0.12455731201171875, 0.12458188629150391, 0.12372480010986328, 0.12267417907714843, 0.12398899078369141, 0.12292813110351562, 0.12310527801513672, 0.12295891571044922, 0.12267308807373047, 0.12258099365234375, 0.12275609588623047, 0.12290764617919922, 0.12266086578369141, 0.12255948638916016, 0.12306329345703125, 0.12263116455078125, 0.12326604461669922, 0.12354764556884766, 0.12335718536376954, 0.12372691345214844, 0.12280210876464843, 0.12289638519287109, 0.12269055938720703, 0.12372787475585938, 0.124010498046875, 0.12365824127197265, 0.12331314849853516, 0.12313497924804688, 0.12537753295898438, 0.12376166534423828, 0.12374527740478515, 0.12372582244873047, 0.1231278076171875, 0.12385382080078125, 0.12404633331298828, 0.12378214263916015, 0.12360908508300782, 0.12295782470703125, 0.12494950103759765, 0.12373811340332032, 0.12298035430908204, 0.12348416137695313, 0.12417740631103516, 0.12380774688720703, 0.2588282775878906, 0.12419481658935547, 0.12356505584716797, 0.12310323333740235, 0.12305203247070312, 0.12337971496582031, 0.12367977905273438, 0.12359062194824219, 0.12260147094726563, 0.12353740692138672, 0.12354354858398438, 0.1232701416015625, 0.12446720123291016, 0.12432588958740234, 0.1239582748413086, 0.12362137603759765, 0.12360499572753907, 0.12407295989990234, 0.12389170837402344, 0.12347392272949219, 0.1235968017578125, 0.12413645172119141, 0.12480512237548828, 0.12312678527832031, 0.12333670043945312, 0.12412928009033203, 0.12416000366210937, 0.12297113800048828, 0.12327529907226563, 0.12358857727050782, 0.12392755126953126, 0.1234176025390625, 0.12362649536132812, 0.12375859069824219, 0.12381183624267578, 0.12392758178710937, 0.12367868804931641, 0.12318924713134766, 0.12382310485839844, 0.12388658905029297, 0.12348006439208985, 0.12391423797607422, 0.12419583892822265, 0.12329676818847657, 0.12479590606689453, 0.12373197174072266, 0.12389478302001954, 0.12393472290039062, 0.12317702484130859, 0.12292499542236328, 0.12298649597167968, 0.12279808044433593, 0.12290457916259766, 0.1228584976196289, 0.12289842987060547, 0.1226424331665039, 0.12311244964599609, 0.12252780914306641, 0.12262598419189454, 0.12282470703125, 0.12261888122558594, 0.12280217742919922, 0.12272025299072266, 0.25568666076660157, 0.1239900131225586, 0.12271616363525391, 0.12272128295898438, 0.1237401580810547, 0.12348108673095703, 0.12270182037353515, 0.12281241607666016, 0.12259839630126954, 0.12345139312744141, 0.12484710693359374, 0.123040771484375, 0.12316671752929688, 0.12355583953857421, 0.12351078033447266, 0.12384358215332031, 0.12355174255371094, 0.12602265930175782, 0.12446208190917969, 0.12428800201416015, 0.12392345428466797, 0.12333055877685548, 0.12432588958740234, 0.12359986877441406, 0.12368793487548828, 0.12390502166748046, 0.12386921691894531, 0.1236797103881836, 0.12345037078857422, 0.12340735626220703, 0.12414361572265625, 0.12356301116943359, 0.12358767700195313, 0.12459613037109375, 0.12439347076416016, 0.12301107025146485, 0.12367359924316407, 0.12390092468261718, 0.12395724487304688, 0.12516966247558595, 0.12354662322998047, 0.12346470642089843, 0.123863037109375, 0.12409139251708984, 0.12322509002685547, 0.12475084686279297, 0.12426445007324219, 0.12396953582763671, 0.12330086517333984, 0.12360908508300782, 0.12444057464599609, 0.12403609466552734, 0.12382208251953125, 0.1240606689453125, 0.12347494506835938, 0.12344012451171875, 0.12362751770019531, 0.1256468505859375, 0.12401663970947266, 0.12414873504638672, 0.12401868438720703, 0.12380364990234374, 0.12371561431884766, 0.25564668273925784, 0.12369203186035156, 0.12362137603759765, 0.12392345428466797, 0.12388044738769531, 0.12370022583007813, 0.12369715118408203, 0.12311347198486328, 0.12364492797851563, 0.123863037109375, 0.12393574523925781, 0.12324249267578125, 0.12262403106689453, 0.12281238555908203, 0.12295372772216796, 0.12290764617919922, 0.12269363403320313, 0.12262297821044922, 0.1231677474975586, 0.12281446075439453, 0.12263526153564454, 0.122714111328125, 0.12280524444580078, 0.12276838684082031, 0.1227110366821289, 0.12276735687255859, 0.12391014099121093, 0.12351795196533204, 0.12277555084228516, 0.12253388977050782, 0.12272025299072266, 0.12271622467041016, 0.12318611145019531, 0.12313190460205078, 0.12436070251464844, 0.12357734680175782, 0.12378726196289062, 0.12353024291992187, 0.12572364807128905, 0.12379545593261719, 0.12325888061523438, 0.12332441711425782, 0.12372991943359375, 0.12286566162109375, 0.1225134048461914, 0.12262400054931641, 0.12326297760009766, 0.12308582305908203, 0.12292403411865234, 0.1229148178100586, 0.12318924713134766, 0.12364492797851563, 0.12364291381835937, 0.12278781127929687, 0.12316159820556641, 0.12317593383789062, 0.12396851348876953, 0.12279193878173827, 0.12260761260986328, 0.12462694549560546, 0.12346470642089843, 0.12266598510742187, 0.1229148178100586, 0.2559385528564453, 0.12347289276123047, 0.12349951934814453, 0.12279500579833984, 0.12253695678710938, 0.1227325439453125, 0.12260454559326171, 0.12274380493164062, 0.12233113861083984, 0.12244992065429687, 0.12274790191650391, 0.12282572937011718, 0.12250214385986329, 0.12254208374023437, 0.12273458862304687, 0.12241919708251953, 0.12272844696044923, 0.1226250228881836, 0.12303974151611329, 0.1227663345336914, 0.12259839630126954, 0.12243353271484375, 0.12274380493164062, 0.12401561737060547, 0.12277452850341797, 0.1226844482421875, 0.12271612548828124, 0.12248268890380859, 0.12263935852050781, 0.12302540588378906, 0.12259327697753906, 0.1225697250366211, 0.12243456268310547, 0.12261273956298828, 0.12260352325439453, 0.12275507354736329, 0.12261068725585937, 0.12276428985595703, 0.12263942718505859, 0.12258604431152344, 0.12272230529785157, 0.12245708465576172, 0.12246527862548828, 0.12284928131103516, 0.12406886291503906, 0.12364492797851563, 0.12372889709472656, 0.12372480010986328, 0.1236131820678711, 0.12349132537841796, 0.12373094177246094, 0.12433715057373047, 0.12420198059082031, 0.12350566101074219, 0.12349030303955078, 0.12351795196533204, 0.1237022705078125, 0.12365004730224609, 0.12362035369873046, 0.12419891357421875, 0.12375347137451172, 0.12319641876220704, 0.12275917053222657, 0.2567250061035156, 0.1229854736328125, 0.12295680236816406, 0.1226270751953125, 0.12288614654541016, 0.12259225463867188, 0.12339711761474609, 0.1230387191772461, 0.12344627380371094, 0.12426547241210938, 0.12386918640136718, 0.12261785888671875, 0.12307660675048829, 0.1229639663696289, 0.12252467346191406, 0.12267314910888671, 0.12372172546386719, 0.12260659027099609, 0.12311961364746093, 0.1224427490234375, 0.12306841278076172, 0.12283699035644531, 0.12256665802001954, 0.12251955413818359, 0.12265580749511719, 0.12248262023925781, 0.12276019287109376, 0.12253388977050782, 0.12335411071777344, 0.12312371063232422, 0.12290866851806641, 0.12254515075683593, 0.12248780822753906, 0.122534912109375, 0.12261682891845703, 0.12288716888427734, 0.12256460571289063, 0.12276838684082031, 0.12297830200195313, 0.1227694091796875, 0.12250418853759766, 0.12257279968261718, 0.1226977310180664, 0.12254617309570312, 0.12250009918212891, 0.122787841796875, 0.122461181640625, 0.12268748474121094, 0.12251545715332031, 0.12266802978515624, 0.1225871353149414, 0.1227623062133789, 0.12271507263183594, 0.12253900909423829, 0.12249292755126953, 0.12263219451904298, 0.12244684600830077, 0.12256870269775391, 0.12278272247314453, 0.12261170959472656, 0.1223895034790039, 0.12428390502929687, 0.12278988647460938, 0.2565724182128906, 0.12282470703125, 0.12249190521240234, 0.12308480072021484, 0.12364595031738282, 0.1230540771484375, 0.12276640319824218, 0.12278163146972657, 0.12248473358154296, 0.12251136016845703, 0.12247654724121093, 0.12248678588867187, 0.12250009918212891, 0.12251955413818359, 0.12259839630126954, 0.12254617309570312, 0.12253593444824219, 0.1236684799194336, 0.12317388916015624, 0.12310221099853516, 0.12281139373779297, 0.12291379547119141, 0.12274483489990234, 0.12282572937011718, 0.12260765075683594, 0.12258812713623046, 0.12272946929931641, 0.12461772918701172, 0.12312989044189453, 0.12266185760498047, 0.12315033721923828, 0.12284620666503906, 0.1230540771484375, 0.122640380859375, 0.12250624084472657, 0.12265676879882813, 0.12255538940429687, 0.12278272247314453, 0.12277555084228516, 0.12254003143310546, 0.12262400054931641, 0.12318208312988281, 0.12256050872802735, 0.12256153869628907, 0.1224816665649414, 0.12287999725341797, 0.12276326751708984, 0.12258406066894531, 0.12270182037353515, 0.12269465637207032, 0.12290150451660156, 0.12344525146484375, 0.12252265930175782, 0.12313801574707031, 0.12258406066894531, 0.12265471649169922, 0.12261888122558594, 0.12246323394775391, 0.12254924774169922, 0.12281753540039063, 0.12254617309570312, 0.12263526153564454, 0.12243353271484375, 0.2562088928222656, 0.12256050872802735, 0.12245913696289062, 0.12246221160888672, 0.122461181640625, 0.12328652954101563, 0.12252774047851563, 0.12238028717041016, 0.12253183746337891, 0.12281037139892578, 0.12281651306152344, 0.12248678588867187, 0.12256358337402344, 0.1226792984008789, 0.12369407653808594, 0.12281958770751954, 0.12281549072265625, 0.12272742462158204, 0.12273356628417968, 0.12324864196777344, 0.12265676879882813, 0.12252365112304688, 0.12428800201416015, 0.12306227111816406, 0.12254208374023437, 0.12252979278564453, 0.12400844573974609, 0.12299775695800781, 0.12261376190185547, 0.12311654663085937, 0.12299980926513672, 0.12294143676757813, 0.12260044860839844, 0.12261682891845703, 0.12266700744628906, 0.1231677474975586, 0.12294656372070313, 0.12281651306152344, 0.12273766326904297, 0.12263731384277343, 0.12283084869384765, 0.12253900909423829, 0.12256768035888672, 0.12280937957763671, 0.12265058898925782, 0.12437811279296875, 0.12318822479248047, 0.12397875213623047, 0.12310733032226563, 0.12303462219238281, 0.12267314910888671, 0.12365312194824218, 0.12290662384033203, 0.12257689666748046, 0.12280012512207031, 0.12264447784423828, 0.12267110443115234, 0.1226455078125, 0.12263731384277343, 0.12309503936767578, 0.12316057586669922, 0.12300800323486329, 0.12256265258789062]",tokens/s,7.995929724812099,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc349-1c29ba267526d7bc3c51ec37;3d42c04d-b592-4b97-92be-2a4081aa1909) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f2d-730b71d70f6c98953eba9479;4b05c784-ed06-49fb-b55d-dd732e4680cf) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,,cuda,0,42,,,,,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1721.061376,22129.672192,0.0,21483.225088,20799.036928,s,10,27.921223388671876,2.7921223388671876,0.002873024218980136,2.791891357421875,2.7956688476562497,2.7963155029296876,2.7968328271484375,"[2.793120361328125, 2.796962158203125, 2.790662353515625, 2.790180419921875, 2.7901220703125, 2.787002685546875, 2.79004345703125, 2.793321533203125, 2.795525146484375, 2.794283203125]",tokens/s,91.68652692484228,kWh,3.2928969429598913e-05,1.8046407099463975e-05,0.00015823184880759956,0.00020920722533666246,tokens/kWh,1223667.1060860218,MB,1726.52544,22129.672192,0.0,21483.225088,20902.142976,s,10,1662.0463125,166.20463124999998,0.013890741352160448,166.199171875,166.2239140625,166.22626953125,166.22815390625,"[166.211921875, 166.223390625, 166.191390625, 166.191359375, 166.191640625, 166.203515625, 166.194828125, 166.2176875, 166.191953125, 166.228625]",tokens/s,0.3790508093919315,kWh,0.0019620591764814327,0.001075380349219922,0.009375421333664404,0.012412860859365755,tokens/kWh,5075.381148131151,,s,629,1684.5351452636726,2.678116288177539,0.33164961442268687,2.637888427734375,2.63990244140625,2.640667578125,5.42840712890625,"[2.638138427734375, 2.638636962890625, 2.638865478515625, 2.6390283203125, 2.6389248046875, 2.63697509765625, 2.6374482421875, 2.637588623046875, 2.6383994140625, 2.637781982421875, 2.63811181640625, 2.639296630859375, 2.6380380859375, 2.637068359375, 2.638099365234375, 2.637255615234375, 2.637476806640625, 2.63743798828125, 2.637467529296875, 2.63781787109375, 2.637115478515625, 2.637315185546875, 2.63943896484375, 2.639993896484375, 2.6398291015625, 2.63933349609375, 2.63897802734375, 2.639859619140625, 2.63980126953125, 2.639171630859375, 2.637111328125, 2.63720556640625, 2.636434326171875, 2.63687158203125, 2.63707958984375, 2.639266845703125, 2.63845263671875, 2.637464599609375, 2.638060546875, 2.64020263671875, 2.638950439453125, 2.63784033203125, 2.63769091796875, 2.6381884765625, 2.639057861328125, 2.637621337890625, 2.638141357421875, 2.63823876953125, 2.639244384765625, 2.63790283203125, 2.6387138671875, 2.6390537109375, 2.638361572265625, 2.637244384765625, 2.639206298828125, 2.638740478515625, 2.639088623046875, 2.638509033203125, 2.637013916015625, 2.637980712890625, 2.637846435546875, 2.638864501953125, 5.43535302734375, 2.63853662109375, 2.63802978515625, 2.63697607421875, 2.637854736328125, 2.638664794921875, 2.63739794921875, 2.637233154296875, 2.637539306640625, 2.637358154296875, 2.637158447265625, 2.637360107421875, 2.63921875, 2.63846923828125, 2.638044189453125, 2.639015869140625, 2.636918701171875, 2.637613037109375, 2.637201416015625, 2.63785888671875, 2.6367763671875, 2.637592529296875, 2.63693310546875, 2.6386318359375, 2.63771337890625, 2.638298095703125, 2.637001708984375, 2.6378701171875, 2.6421064453125, 2.6414111328125, 2.641197021484375, 2.64100146484375, 2.63889111328125, 2.6406533203125, 2.6406552734375, 2.637020263671875, 2.637322265625, 2.63722900390625, 2.63769091796875, 2.6369853515625, 2.63765185546875, 2.63745751953125, 2.638650390625, 2.638825439453125, 2.637306884765625, 2.64330859375, 2.641152099609375, 2.6409931640625, 2.63775244140625, 2.639099853515625, 2.638685302734375, 2.6388388671875, 2.637993896484375, 2.639129638671875, 2.638023681640625, 2.638602294921875, 2.63790283203125, 2.63815380859375, 2.637550537109375, 2.63849169921875, 2.639201171875, 2.6400400390625, 2.638790771484375, 5.4299453125, 2.63796826171875, 2.638594970703125, 2.637592529296875, 2.6385439453125, 2.637388916015625, 2.638464111328125, 2.637737060546875, 2.63819775390625, 2.63681005859375, 2.637154296875, 2.63805224609375, 2.638739501953125, 2.63828173828125, 2.638024658203125, 2.637736083984375, 2.637816650390625, 2.63781884765625, 2.63653466796875, 2.637263916015625, 2.637474853515625, 2.63747998046875, 2.636569580078125, 2.64140478515625, 2.637125732421875, 2.638499755859375, 2.6369833984375, 2.637737060546875, 2.636338134765625, 2.63744921875, 2.637201416015625, 2.638740478515625, 2.638299072265625, 2.63929150390625, 2.637087646484375, 2.637667236328125, 2.6377021484375, 2.636683349609375, 2.637559814453125, 2.637581298828125, 2.6383564453125, 2.636505126953125, 2.63706005859375, 2.636032958984375, 2.6379765625, 2.637094970703125, 2.63819775390625, 2.64007568359375, 2.6376611328125, 2.6375966796875, 2.6389228515625, 2.63778515625, 2.639754150390625, 2.6381884765625, 2.63853466796875, 2.63933642578125, 2.639509521484375, 2.639793212890625, 2.640154541015625, 2.63959033203125, 2.636541015625, 2.6371533203125, 2.63670068359375, 5.43085791015625, 2.64038818359375, 2.640405517578125, 2.63921875, 2.638604248046875, 2.638392333984375, 2.63703857421875, 2.638612548828125, 2.638138427734375, 2.637631591796875, 2.637518798828125, 2.636412841796875, 2.639814697265625, 2.63709912109375, 2.638194580078125, 2.636851318359375, 2.637906982421875, 2.636904541015625, 2.63757421875, 2.637557861328125, 2.637801513671875, 2.637253662109375, 2.638193603515625, 2.63731396484375, 2.63754345703125, 2.637656005859375, 2.638063720703125, 2.6375537109375, 2.63655517578125, 2.636990478515625, 2.6373427734375, 2.638109619140625, 2.637427734375, 2.637330322265625, 2.63708154296875, 2.63813232421875, 2.636632080078125, 2.638185546875, 2.636789794921875, 2.6382294921875, 2.636559326171875, 2.63756494140625, 2.6364814453125, 2.637990966796875, 2.64270947265625, 2.63874755859375, 2.640064453125, 2.638127197265625, 2.637509521484375, 2.637388916015625, 2.637737060546875, 2.63737353515625, 2.639585205078125, 2.637530029296875, 2.638297119140625, 2.63809423828125, 2.637974609375, 2.63773583984375, 2.637834228515625, 2.63741552734375, 2.637762451171875, 2.637958251953125, 2.63785791015625, 5.428443359375, 2.63807275390625, 2.6370693359375, 2.638107666015625, 2.637665283203125, 2.63819677734375, 2.6379326171875, 2.63821923828125, 2.6370078125, 2.63802685546875, 2.638180419921875, 2.63954443359375, 2.6387373046875, 2.638023681640625, 2.636853271484375, 2.637129638671875, 2.637197265625, 2.636950439453125, 2.63758544921875, 2.638212158203125, 2.63828076171875, 2.6365849609375, 2.6370908203125, 2.638017578125, 2.63769384765625, 2.636833740234375, 2.638193603515625, 2.638341064453125, 2.640734130859375, 2.63990576171875, 2.639550537109375, 2.638340087890625, 2.63885107421875, 2.63923095703125, 2.6383955078125, 2.636854248046875, 2.636768310546875, 2.6369443359375, 2.637570068359375, 2.637836181640625, 2.63730078125, 2.63779541015625, 2.63750244140625, 2.637592529296875, 2.6366474609375, 2.638162841796875, 2.636866455078125, 2.6375146484375, 2.636231689453125, 2.6370693359375, 2.637305908203125, 2.63849169921875, 2.636894287109375, 2.636900390625, 2.637894775390625, 2.637490234375, 2.6377666015625, 2.64785107421875, 2.6376591796875, 2.637158447265625, 2.637832275390625, 2.63720849609375, 2.639467529296875, 5.425724609375, 2.637199462890625, 2.63809130859375, 2.638032958984375, 2.637854736328125, 2.638718017578125, 2.63857861328125, 2.637035400390625, 2.63773583984375, 2.638855224609375, 2.63739892578125, 2.638341064453125, 2.63885205078125, 2.639034423828125, 2.63807177734375, 2.637761474609375, 2.639326171875, 2.639801513671875, 2.639014892578125, 2.639296630859375, 2.637675537109375, 2.63769287109375, 2.637137939453125, 2.637119384765625, 2.6383955078125, 2.63736328125, 2.637094970703125, 2.637177734375, 2.6371953125, 2.637066162109375, 2.637592529296875, 2.639177734375, 2.637533203125, 2.6364169921875, 2.636937255859375, 2.6390283203125, 2.63927294921875, 2.63655126953125, 2.64357373046875, 2.637197265625, 2.637576171875, 2.638668701171875, 2.639214599609375, 2.639289306640625, 2.639532958984375, 2.638011474609375, 2.63690234375, 2.63817626953125, 2.636898193359375, 2.637287353515625, 2.637656982421875, 2.637981689453125, 2.636938232421875, 2.63751171875, 2.6373232421875, 2.63774609375, 2.637768798828125, 2.63883056640625, 2.639134765625, 2.641203125, 2.639278076171875, 2.638201904296875, 2.637488037109375, 5.42831396484375, 2.637304931640625, 2.638138427734375, 2.636865478515625, 2.639958984375, 2.63714404296875, 2.638241943359375, 2.63680810546875, 2.638630859375, 2.638310302734375, 2.640628662109375, 2.639406982421875, 2.63910302734375, 2.6390712890625, 2.640678955078125, 2.640310302734375, 2.63718701171875, 2.637646728515625, 2.637350830078125, 2.638884765625, 2.6375947265625, 2.637400146484375, 2.6368818359375, 2.642212890625, 2.637052978515625, 2.637252685546875, 2.636802001953125, 2.64029296875, 2.6377216796875, 2.6375556640625, 2.6366923828125, 2.63757421875, 2.638635009765625, 2.637696044921875, 2.637308837890625, 2.637477783203125, 2.637402099609375, 2.6365869140625, 2.637488037109375, 2.638253173828125, 2.6386328125, 2.637402099609375, 2.63834716796875, 2.6377451171875, 2.637392822265625, 2.636634033203125, 2.637593505859375, 2.637182861328125, 2.637476806640625, 2.6378896484375, 2.637464599609375, 2.639731689453125, 2.637717529296875, 2.63756591796875, 2.638674072265625, 2.63798974609375, 2.638200927734375, 2.63733251953125, 2.63752197265625, 2.63793359375, 2.636739501953125, 2.63832470703125, 2.637581298828125, 5.43222705078125, 2.638201904296875, 2.63801025390625, 2.639353759765625, 2.63851513671875, 2.637308837890625, 2.637641845703125, 2.6370263671875, 2.636823486328125, 2.638310302734375, 2.637739990234375, 2.636812255859375, 2.637005859375, 2.63739794921875, 2.637341796875, 2.636630126953125, 2.64057861328125, 2.638487548828125, 2.6377880859375, 2.63784033203125, 2.637433837890625, 2.638781494140625, 2.638268310546875, 2.63773486328125, 2.63734375, 2.64085302734375, 2.637772705078125, 2.63678369140625, 2.636781494140625, 2.637518798828125, 2.637106201171875, 2.636708984375, 2.63695166015625, 2.63906103515625, 2.6390087890625, 2.639414306640625, 2.63884912109375, 2.638530517578125, 2.639602783203125, 2.639301513671875, 2.638671875, 2.6394306640625, 2.640407470703125, 2.63750244140625, 2.640359375, 2.640257080078125, 2.641056884765625, 2.639901611328125, 2.64020263671875, 2.640825439453125, 2.637892578125, 2.63866357421875, 2.63870166015625, 2.638103515625, 2.641052734375, 2.639697998046875, 2.639138916015625, 2.63690234375, 2.63733447265625, 2.637642822265625, 2.63709912109375, 2.6370068359375, 2.638327880859375, 5.4337607421875, 2.63835546875, 2.638138427734375, 2.637888427734375, 2.63785986328125, 2.63816796875, 2.637724609375, 2.63817431640625, 2.63734375, 2.637358154296875, 2.637413330078125, 2.63850390625, 2.63788134765625, 2.638455810546875, 2.63848046875, 2.637498291015625, 2.637077392578125, 2.640005126953125, 2.638171142578125, 2.63721875, 2.638088134765625, 2.637340576171875, 2.637073486328125, 2.637824951171875, 2.63834619140625, 2.63819580078125, 2.63836474609375, 2.63822021484375, 2.637216796875, 2.63757421875, 2.63648046875, 2.6365439453125, 2.63712255859375, 2.63768994140625, 2.636919921875, 2.637600830078125, 2.637790283203125, 2.63785986328125, 2.636936279296875, 2.637821044921875, 2.637937744140625, 2.638752685546875, 2.63861767578125, 2.637978515625, 2.63923291015625, 2.639403076171875, 2.63928515625, 2.63895751953125, 2.63758447265625, 2.637350830078125, 2.63724853515625, 2.6378095703125, 2.638350341796875, 2.637401123046875, 2.636908447265625, 2.63790478515625, 2.638477294921875, 2.638087158203125, 2.639475830078125, 2.639469482421875, 2.640037841796875, 2.637907958984375, 2.636857421875, 5.4322666015625, 2.640575439453125, 2.641816650390625, 2.640384033203125, 2.639365234375, 2.639097900390625, 2.6402724609375, 2.638215087890625, 2.638160888671875, 2.637603759765625, 2.637083740234375, 2.6377421875, 2.637223876953125, 2.6389013671875, 2.638928955078125, 2.637473876953125, 2.637214599609375, 2.636859375, 2.637595703125, 2.63667822265625, 2.6370498046875, 2.638342041015625, 2.63901904296875, 2.637927490234375, 2.63882958984375, 2.640554931640625, 2.64306884765625, 2.639297607421875, 2.6393896484375, 2.64045166015625, 2.63986376953125, 2.639677490234375, 2.64067578125, 2.64060009765625, 2.640331787109375, 2.640183349609375, 2.639626220703125, 2.640194580078125, 2.63760498046875, 2.639280029296875, 2.6397255859375, 2.641039306640625, 2.6397880859375, 2.6377646484375, 2.639326171875, 2.63863916015625, 2.636462158203125, 2.637134765625, 2.6374892578125, 2.637487060546875, 2.63678466796875, 2.63695361328125, 2.636927978515625, 2.636856201171875, 2.63660546875, 2.637761474609375, 2.637675537109375, 2.6367119140625, 2.637622314453125, 2.6384404296875, 2.637370361328125, 2.63602880859375, 2.636900390625]",tokens/s,0.37339678057090686,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ed7-32bed09473c19a255f33cf0c;2867b67d-be29-4b28-ab6d-9da6caa46751) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1618.886656,7600.603136,0.0,6954.156032,6314.17344,s,10,6.225003845214843,0.6225003845214843,0.0013149278084200315,0.6224861450195311,0.623799609375,0.6241176086425781,0.6243720080566406,"[0.6244356079101563, 0.620072998046875, 0.6223452758789062, 0.620931640625, 0.6226270141601562, 0.6217324829101563, 0.6218421630859375, 0.6236920776367187, 0.6237289428710937, 0.6235956420898437]",tokens/s,411.2447258916749,kWh,7.3330029845237736e-06,4.018192916897961e-06,3.395321343706048e-05,4.530440933848221e-05,tokens/kWh,5650664.112787582,MB,1618.886656,7600.603136,0.0,6954.156032,6464.046592,s,10,368.56716015625,36.856716015625,0.026041797027344372,36.850578125,36.898916796875,36.9004232421875,36.9016283984375,"[36.86497265625, 36.8430390625, 36.9019296875, 36.8581171875, 36.89858203125, 36.87110546875, 36.828984375, 36.82765625, 36.830046875, 36.8427265625]",tokens/s,1.7093221211920195,kWh,0.0004354530184467634,0.00023866611181760483,0.001976968700854742,0.00265108783111911,tokens/kWh,23763.829798655013,,s,629,373.5824957885741,0.5939308359118828,0.07397669449852429,0.58480126953125,0.5863296997070312,0.586693212890625,1.20664294921875,"[0.5845360717773438, 0.585296875, 0.5849774169921875, 0.5846087646484375, 0.5850286254882813, 0.584748046875, 0.584322021484375, 0.5847828369140625, 0.584279052734375, 0.58402099609375, 0.584068115234375, 0.5846630249023438, 0.584384521484375, 0.5846179809570312, 0.584816650390625, 0.584005615234375, 0.58421142578125, 0.5841326293945313, 0.584958984375, 0.5861488647460937, 0.584427490234375, 0.5843035888671875, 0.5846282348632813, 0.5846927490234375, 0.5846661376953125, 0.5845155639648437, 0.5843814697265625, 0.5843046264648437, 0.5848248291015625, 0.5849682006835938, 0.5866096801757813, 0.5847296142578124, 0.5858385620117188, 0.5855538940429688, 0.58547607421875, 0.5869332275390625, 0.5866168212890625, 0.584468505859375, 0.5869219970703125, 0.5864007568359375, 0.5853429565429688, 0.5859860229492188, 0.5848975219726562, 0.5843538208007812, 0.584753173828125, 0.5861519165039063, 0.585596923828125, 0.5861099243164063, 0.5845759887695312, 0.5846599731445312, 0.5848914184570313, 0.584426513671875, 0.5851033325195313, 0.5860843505859376, 0.5848597412109375, 0.5849896240234375, 0.5867396850585938, 0.586756103515625, 0.5865123901367187, 0.586208251953125, 0.5867110595703126, 0.585744384765625, 1.206224853515625, 0.5858980102539062, 0.5855364990234375, 0.5844613037109375, 0.584605712890625, 0.5844961547851563, 0.5850460205078125, 0.58532861328125, 0.5840045776367188, 0.5837639770507812, 0.5838192749023438, 0.5844100952148438, 0.585660400390625, 0.5845892944335938, 0.5847992553710938, 0.5843865356445312, 0.585439208984375, 0.5858754272460938, 0.5853839111328125, 0.5851627807617188, 0.5849927978515626, 0.5848330078125, 0.58598193359375, 0.58480126953125, 0.5861355590820313, 0.5848627319335937, 0.5850623779296875, 0.5845616455078125, 0.584658935546875, 0.5841868896484375, 0.5848627319335937, 0.5841438598632812, 0.584394775390625, 0.5841991577148438, 0.5862697143554687, 0.5853613891601562, 0.584890380859375, 0.584838134765625, 0.5842606201171875, 0.5844306030273437, 0.5846517944335937, 0.5859328002929688, 0.5848330078125, 0.5846046752929688, 0.58459033203125, 0.5843906860351562, 0.5847449340820312, 0.5849129028320312, 0.58431591796875, 0.5842001953125, 0.58471728515625, 0.5849159545898438, 0.5853143310546876, 0.584321044921875, 0.5845770263671874, 0.58439990234375, 0.584537109375, 0.5842237548828125, 0.5845657348632812, 0.5844357299804688, 0.5841551513671875, 0.5848032836914062, 0.584511474609375, 1.20827490234375, 0.5846036376953125, 0.5843558349609375, 0.5847777099609375, 0.585406494140625, 0.5854955444335938, 0.5848872680664062, 0.58439990234375, 0.5842575073242188, 0.5842974853515625, 0.5841448974609375, 0.5848606567382812, 0.5855733642578125, 0.5853153076171875, 0.5855313720703125, 0.5857454223632812, 0.58644482421875, 0.586587158203125, 0.5863075561523438, 0.5864365844726562, 0.586102783203125, 0.5866045532226563, 0.5849364624023438, 0.587236328125, 0.586629150390625, 0.5864550170898437, 0.5869383544921875, 0.5851351318359375, 0.5868165283203125, 0.5862113037109375, 0.586013671875, 0.5854505004882813, 0.5856593627929687, 0.5856215209960938, 0.5870980834960937, 0.5859666137695313, 0.5857300415039063, 0.585511962890625, 0.5861007080078126, 0.586576904296875, 0.5868328857421875, 0.5858846435546875, 0.5864171752929688, 0.585228271484375, 0.58509521484375, 0.5863771362304687, 0.5851309814453125, 0.5850357666015625, 0.5855375366210938, 0.5851135864257813, 0.585407470703125, 0.5852764282226562, 0.5863362426757812, 0.5863567504882813, 0.5862123413085938, 0.5860802612304687, 0.5851658325195312, 0.5870366821289063, 0.5865308227539062, 0.5864335327148438, 0.5844910278320312, 0.58482275390625, 0.5869608764648437, 1.205981201171875, 0.58442138671875, 0.5845964965820313, 0.58412646484375, 0.5846712036132813, 0.584859619140625, 0.584500244140625, 0.5844193115234375, 0.5846005859375, 0.584226806640625, 0.5847767333984375, 0.5847193603515625, 0.5840773315429687, 0.5841602783203125, 0.5838458862304687, 0.5849825439453125, 0.5842872314453125, 0.5852507934570312, 0.58406298828125, 0.5844735717773437, 0.5843538208007812, 0.5846784057617187, 0.5847019653320312, 0.5851156616210937, 0.5848043823242187, 0.5851064453125, 0.5846159057617187, 0.5852262573242187, 0.5848002319335938, 0.5843343505859375, 0.5843394775390625, 0.5842994995117188, 0.584975341796875, 0.5844992065429687, 0.58461083984375, 0.5840097045898438, 0.5850368041992188, 0.5848576049804688, 0.5844039916992188, 0.5848340454101563, 0.5848616943359375, 0.584585205078125, 0.585101318359375, 0.5855078125, 0.5868196411132812, 0.5852261962890625, 0.5863843994140625, 0.586692626953125, 0.5863259887695312, 0.5871114501953125, 0.5859533081054688, 0.5858959350585937, 0.585849853515625, 0.5871841430664062, 0.5864273681640625, 0.5860311279296875, 0.586377197265625, 0.5844725952148437, 0.5856563720703125, 0.5858579711914063, 0.5854105834960938, 0.5857003784179687, 0.5850337524414062, 1.2078602294921874, 0.5852139282226563, 0.585723876953125, 0.586250244140625, 0.586271728515625, 0.5863413696289063, 0.586808349609375, 0.5861898193359375, 0.5845196533203125, 0.5865604858398438, 0.5842206420898437, 0.58428515625, 0.5850715942382813, 0.5856091918945312, 0.5868994750976563, 0.5851893920898438, 0.585296875, 0.5864417114257813, 0.5843292236328125, 0.5858539428710937, 0.5862738037109375, 0.5860853881835938, 0.5861775512695313, 0.5855631103515625, 0.5856204833984375, 0.5868216552734375, 0.5872056274414063, 0.584875, 0.5847664794921875, 0.5854320678710937, 0.5869844360351563, 0.5866485595703125, 0.5866680297851562, 0.5851105346679687, 0.5844152221679687, 0.5853849487304688, 0.5857136840820313, 0.5857628173828126, 0.5850736694335937, 0.5865482177734375, 0.5847275390625, 0.5847992553710938, 0.5843517456054688, 0.586081298828125, 0.5863280639648437, 0.5844951171875, 0.5858427124023438, 0.5852047119140625, 0.585734130859375, 0.586134521484375, 0.5850726318359375, 0.585660400390625, 0.584953857421875, 0.5860597534179688, 0.586693603515625, 0.5868861694335937, 0.5861458129882813, 0.5863157958984375, 0.5857095947265625, 0.5846128540039063, 0.5858980102539062, 0.5863372802734375, 0.5851473999023438, 1.208574951171875, 0.5858948974609375, 0.5860034790039063, 0.5858488159179688, 0.5846507568359375, 0.5853051147460937, 0.5856901245117188, 0.585691162109375, 0.584543212890625, 0.5852897338867188, 0.5849733276367187, 0.5857167358398437, 0.5865441284179688, 0.585660400390625, 0.5851954956054688, 0.586119140625, 0.58567578125, 0.5863833618164063, 0.5861325073242187, 0.5857044677734375, 0.585565185546875, 0.5863946533203125, 0.585654296875, 0.5863167724609375, 0.5854157104492187, 0.5853399047851563, 0.5847070922851563, 0.5861939086914062, 0.58540234375, 0.5899376831054688, 0.5855396118164062, 0.5845524291992188, 0.5847060546875, 0.584268798828125, 0.5851054077148438, 0.5849722900390625, 0.5850357666015625, 0.5842964477539062, 0.5849876708984375, 0.5849682006835938, 0.5848822021484374, 0.585654296875, 0.584900634765625, 0.5849497680664062, 0.5853214721679687, 0.5848350830078125, 0.5849169921875, 0.5852866821289062, 0.58450537109375, 0.58450537109375, 0.5844418334960938, 0.5846466674804688, 0.584453125, 0.5847265014648437, 0.5847725830078125, 0.5843486938476562, 0.5844705200195313, 0.5843916625976563, 0.5847930908203125, 0.5845043334960938, 0.5840936889648437, 0.5843517456054688, 0.58418994140625, 1.2068055419921875, 0.5848176879882813, 0.5846128540039063, 0.5843087158203125, 0.5846333618164062, 0.5844971313476562, 0.5846210327148438, 0.58439990234375, 0.5845780639648438, 0.585080810546875, 0.5844469604492187, 0.58540234375, 0.5848923950195313, 0.5850890502929688, 0.5851924438476562, 0.5849088134765625, 0.5848033447265625, 0.5851453247070313, 0.5851412353515625, 0.5849180297851563, 0.5844786987304688, 0.5851146240234375, 0.5842401123046875, 0.5852006225585937, 0.5848944702148438, 0.5846077270507812, 0.5847623901367187, 0.5846937866210937, 0.5855672607421875, 0.5844490356445312, 0.584658935546875, 0.584933349609375, 0.5843896484375, 0.5846292724609375, 0.5843240966796875, 0.5845176391601562, 0.5844623413085938, 0.5843394775390625, 0.584595458984375, 0.58416845703125, 0.5841622924804688, 0.5841520385742187, 0.5843251342773438, 0.58414697265625, 0.5844735717773437, 0.5843404541015625, 0.5842247924804688, 0.5839093627929688, 0.5843087158203125, 0.5839298706054687, 0.5844920043945312, 0.5843517456054688, 0.5847521362304687, 0.5843599243164063, 0.5843937377929688, 0.584721435546875, 0.58444287109375, 0.5844132080078125, 0.5843599243164063, 0.58385205078125, 0.5841694946289062, 0.5840465698242188, 0.5851791381835938, 1.2095078125, 0.5847941284179687, 0.5849180297851563, 0.5843486938476562, 0.5844295654296875, 0.5849845581054688, 0.5841305541992188, 0.5846712036132813, 0.58494873046875, 0.584680419921875, 0.585617431640625, 0.5844520874023438, 0.5843486938476562, 0.5840803833007813, 0.5843619995117187, 0.5847122192382812, 0.5845555419921875, 0.5846507568359375, 0.5844510498046875, 0.584573974609375, 0.5840926513671875, 0.5843978271484375, 0.584416259765625, 0.584521728515625, 0.5843097534179688, 0.5844971313476562, 0.5846302490234375, 0.5850009765625, 0.5847193603515625, 0.58450537109375, 0.5842810668945313, 0.5842012329101562, 0.5845913696289062, 0.5843363647460937, 0.5843660888671875, 0.584268798828125, 0.5849375, 0.5842759399414063, 0.5846517944335937, 0.5850183715820313, 0.58439990234375, 0.5842298583984376, 0.5847930908203125, 0.5849313354492187, 0.5852579956054688, 0.58459033203125, 0.5851351318359375, 0.5841787109375, 0.5843189697265625, 0.5841643676757813, 0.5844838256835938, 0.5843446044921875, 0.5842411499023438, 0.5844346923828125, 0.584469482421875, 0.5841571655273438, 0.5850153198242187, 0.5845196533203125, 0.5843230590820313, 0.5843446044921875, 0.5848402099609376, 0.584669189453125, 0.584326171875, 1.2087510986328125, 0.5850153198242187, 0.5850787963867188, 0.5847817993164063, 0.5849047241210937, 0.5845442504882813, 0.5847285766601562, 0.5845186767578125, 0.5842227172851563, 0.5838796997070312, 0.5849476928710937, 0.584469482421875, 0.584501220703125, 0.5847080688476562, 0.5847900390625, 0.5847060546875, 0.5841131591796875, 0.5844295654296875, 0.5843087158203125, 0.5849825439453125, 0.584447998046875, 0.5852078247070313, 0.5841234130859375, 0.5847500610351563, 0.5847091064453125, 0.584690673828125, 0.58425244140625, 0.5844613037109375, 0.584374267578125, 0.5843394775390625, 0.5849129028320312, 0.584479736328125, 0.5842227172851563, 0.5842257690429687, 0.5848576049804688, 0.5845330200195312, 0.5844653930664062, 0.58503271484375, 0.584975341796875, 0.5848627319335937, 0.5849794311523437, 0.5842933959960938, 0.58421044921875, 0.5843302612304687, 0.584958984375, 0.5844111328125, 0.58435888671875, 0.5848289184570312, 0.584627197265625, 0.5843753051757813, 0.5846619873046875, 0.584248291015625, 0.5846046752929688, 0.5841663818359375, 0.5857310791015625, 0.58428515625, 0.5845514526367187, 0.584437744140625, 0.5845524291992188, 0.5843087158203125, 0.584700927734375, 0.5854146728515625, 0.5844777221679688, 1.2091043701171875, 0.5852241821289063, 0.5847367553710937, 0.5846507568359375, 0.5843937377929688, 0.5844100952148438, 0.584079345703125, 0.5851975708007813, 0.5844152221679687, 0.5847306518554688, 0.5847654418945313, 0.5844561767578125, 0.584437744140625, 0.5841787109375, 0.5849343872070313, 0.5845084228515625, 0.5842759399414063, 0.58461181640625, 0.5846661376953125, 0.5853173828125, 0.5845933837890624, 0.5847725830078125, 0.5848923950195313, 0.5846435546875, 0.5854658813476562, 0.5842360229492187, 0.5851566162109375, 0.5853972778320312, 0.5851463623046875, 0.5845339965820312, 0.5844100952148438, 0.5846067504882813, 0.584648681640625, 0.5843169555664063, 0.5846702270507812, 0.5859276733398437, 0.5848790893554687, 0.5843026123046875, 0.584543212890625, 0.5850736694335937, 0.5842780151367187, 0.5850654907226562, 0.5847388305664063, 0.5847439575195312, 0.5851463623046875, 0.5845985107421875, 0.5845514526367187, 0.584332275390625, 0.58463232421875, 0.5850685424804688, 0.5847633666992188, 0.5853829345703125, 0.5849159545898438, 0.585486328125, 0.5849415893554688, 0.584775634765625, 0.5854443359375, 0.5850316772460937, 0.5853870239257812, 0.5852672119140625, 0.5847562255859375, 0.5845729370117188, 0.5851094970703125]",tokens/s,1.6836977296601632,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31fc-2127380443dbbf1d7fc3dc91;4cbbddad-8dc9-4dd0-9e53-0f8c66e3ddee) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2104.19712,2911.371264,0.0,2264.92416,2140.72832,s,10,2.4884028778076175,0.2488402877807617,0.0017336823043823078,0.2489057083129883,0.25072755279541015,0.2508263542175293,0.2509053953552246,"[0.2507055969238281, 0.25092515563964846, 0.24757785034179688, 0.24682144165039063, 0.24747581481933595, 0.24631741333007812, 0.24757225036621094, 0.25036245727539064, 0.2504113311767578, 0.2502335662841797]",tokens/s,1028.7723193181093,kWh,2.9072395831265747e-06,1.5930372833037567e-06,1.325819488838971e-05,1.775847175482004e-05,tokens/kWh,14415654.879227767,MB,2104.19712,2911.371264,0.0,2264.92416,2246.907904,s,10,145.7403466796875,14.57403466796875,0.01212453290098591,14.5695126953125,14.590745703125,14.591192187499999,14.591549375,"[14.590646484375, 14.591638671875, 14.582625, 14.5875322265625, 14.570828125, 14.568197265625, 14.5662099609375, 14.56188671875, 14.5592158203125, 14.56156640625]",tokens/s,4.3227562878290176,kWh,0.00017210086691347241,9.432530317883612e-05,0.0007748077688972132,0.001041233938989522,tokens/kWh,60505.13495664491,,s,629,147.744148284912,0.23488735816361225,0.029563076186171044,0.23116493225097656,0.23200419921874998,0.2323472412109375,0.47893525024414063,"[0.23276133728027343, 0.23156428527832032, 0.23250534057617187, 0.23177215576171875, 0.23175167846679687, 0.2316636199951172, 0.23166464233398437, 0.23181925964355468, 0.2317864990234375, 0.2317178955078125, 0.23165542602539063, 0.23199948120117186, 0.2319114227294922, 0.23230770874023438, 0.2314403839111328, 0.23090074157714843, 0.2311679992675781, 0.23108505249023437, 0.23182847595214845, 0.23120793151855468, 0.2317506561279297, 0.23156838989257814, 0.23112498474121093, 0.23156224060058594, 0.23118540954589845, 0.23118438720703124, 0.230614013671875, 0.23102362060546874, 0.2310082550048828, 0.23108607482910157, 0.230908935546875, 0.23087001037597657, 0.23108709716796874, 0.2310952911376953, 0.23200051879882813, 0.2310277099609375, 0.23204249572753907, 0.2310840301513672, 0.230835205078125, 0.23095603942871093, 0.23102668762207032, 0.23113728332519531, 0.23130213928222657, 0.23161343383789063, 0.2317076416015625, 0.23303167724609375, 0.23183871459960936, 0.2320199737548828, 0.23191552734375, 0.23193702697753907, 0.23194009399414062, 0.23124581909179687, 0.23149977111816405, 0.23219200134277343, 0.2310451202392578, 0.23188479614257812, 0.23178752136230468, 0.23196159362792967, 0.23144857788085937, 0.23166770935058595, 0.23165029907226561, 0.23200358581542968, 0.48229580688476564, 0.2318663635253906, 0.23166464233398437, 0.23162367248535157, 0.23172402954101562, 0.23182745361328125, 0.23196876525878907, 0.2318561248779297, 0.23200358581542968, 0.23229849243164064, 0.23214796447753908, 0.2318919677734375, 0.23183360290527344, 0.23185714721679687, 0.23202919006347655, 0.231910400390625, 0.23255039978027345, 0.23172607421875, 0.23201484680175782, 0.23192678833007813, 0.23147724914550782, 0.23117721557617188, 0.23114137268066406, 0.2309345245361328, 0.2317998046875, 0.23216639709472656, 0.23188581848144532, 0.2317004852294922, 0.23109426879882813, 0.23139736938476563, 0.23195648193359375, 0.23148544311523436, 0.23106661987304689, 0.23104920959472655, 0.23114137268066406, 0.23106048583984376, 0.2310031433105469, 0.23155711364746093, 0.2323671112060547, 0.23183564758300781, 0.23187660217285155, 0.2323937225341797, 0.23180697631835936, 0.23087820434570314, 0.2311065673828125, 0.2314956817626953, 0.2341201934814453, 0.23121510314941407, 0.23109735107421875, 0.23107379150390625, 0.23113011169433595, 0.2308290557861328, 0.2309160919189453, 0.2308720703125, 0.23094578552246095, 0.2308229064941406, 0.2311014404296875, 0.23225958251953124, 0.23146803283691406, 0.232700927734375, 0.23182234191894532, 0.23120179748535155, 0.2311956481933594, 0.47941940307617187, 0.231984130859375, 0.23158067321777343, 0.23134104919433593, 0.23164210510253908, 0.23269786071777343, 0.2318868408203125, 0.23200665283203126, 0.23196467590332032, 0.23198822021484375, 0.23132159423828125, 0.23144960021972658, 0.23234970092773438, 0.23229849243164064, 0.2326097869873047, 0.23209368896484375, 0.23207942199707032, 0.23165023803710938, 0.2314403839111328, 0.23190937805175782, 0.23209164428710938, 0.23215309143066407, 0.23194931030273438, 0.23258828735351564, 0.23188890075683594, 0.231699462890625, 0.23152946472167968, 0.23133287048339843, 0.23157452392578126, 0.23149261474609376, 0.23182847595214845, 0.23126322937011717, 0.23139430236816405, 0.23153561401367187, 0.23187046813964843, 0.23149977111816405, 0.23232000732421876, 0.2311393280029297, 0.2319052734375, 0.23080345153808593, 0.23089459228515624, 0.23061196899414063, 0.23088230895996092, 0.23082701110839843, 0.23086898803710937, 0.23073382568359374, 0.23112396240234376, 0.2319974365234375, 0.2311065673828125, 0.23078912353515624, 0.23073587036132812, 0.2308495330810547, 0.23100108337402345, 0.23077580261230468, 0.23094578552246095, 0.23075942993164061, 0.23081983947753906, 0.23078399658203125, 0.23087615966796876, 0.23072665405273438, 0.23087615966796876, 0.2308290557861328, 0.23087615966796876, 0.4795392150878906, 0.23177830505371094, 0.2310133819580078, 0.23141786193847655, 0.23111372375488282, 0.23092633056640624, 0.23101951599121093, 0.23092633056640624, 0.23117619323730468, 0.2310963134765625, 0.23151104736328126, 0.23107174682617188, 0.23176499938964842, 0.23153663635253907, 0.2313123779296875, 0.23081983947753906, 0.23273677062988282, 0.23198104858398438, 0.23182540893554687, 0.2313287658691406, 0.23094989013671874, 0.23091506958007812, 0.23098880004882813, 0.23076454162597657, 0.230761474609375, 0.23085772705078125, 0.23132159423828125, 0.23243980407714843, 0.23207936096191406, 0.23211520385742188, 0.23186534118652344, 0.2317998046875, 0.2313912353515625, 0.23109324645996093, 0.23126527404785155, 0.23119667053222656, 0.23178956604003906, 0.2312058868408203, 0.23166157531738282, 0.23158784484863282, 0.23162060546875, 0.23184281921386718, 0.23122227478027343, 0.2323435516357422, 0.2337822723388672, 0.23145062255859375, 0.2319605712890625, 0.2309754943847656, 0.23164108276367187, 0.23121817016601562, 0.23156736755371093, 0.2316451873779297, 0.2317619171142578, 0.23206605529785157, 0.2315130920410156, 0.23114854431152343, 0.2315274200439453, 0.2312376251220703, 0.23179263305664063, 0.23200358581542968, 0.23266099548339844, 0.23178752136230468, 0.2316636199951172, 0.4785776672363281, 0.23102053833007813, 0.2308526153564453, 0.23079936218261718, 0.230835205078125, 0.2308177947998047, 0.2319779815673828, 0.23094578552246095, 0.23088333129882813, 0.23176089477539064, 0.2309969940185547, 0.23143936157226563, 0.23160627746582033, 0.231478271484375, 0.2311690216064453, 0.2314639434814453, 0.23161958312988282, 0.23254835510253907, 0.23204966735839844, 0.2317864990234375, 0.23187455749511718, 0.23142707824707032, 0.232457275390625, 0.2311648712158203, 0.2321274871826172, 0.23180799865722657, 0.2312376251220703, 0.2311690216064453, 0.23100621032714844, 0.23096115112304688, 0.23099392700195312, 0.23096217346191406, 0.23088742065429688, 0.2309969940185547, 0.23093043518066406, 0.23100210571289062, 0.23089971923828126, 0.23066213989257814, 0.23169740295410157, 0.23114239501953124, 0.23260263061523437, 0.23084646606445314, 0.23156736755371093, 0.23112498474121093, 0.23072972106933592, 0.23080447387695313, 0.23094992065429687, 0.23107376098632812, 0.23089260864257813, 0.23205165100097655, 0.23145266723632812, 0.23087309265136718, 0.23119052124023437, 0.2307184600830078, 0.2308362274169922, 0.23087513732910156, 0.23102566528320312, 0.2333665313720703, 0.23122125244140626, 0.2310635528564453, 0.230940673828125, 0.23090483093261718, 0.23118336486816407, 0.47746356201171875, 0.2322493438720703, 0.2315694122314453, 0.23091404724121095, 0.2314844207763672, 0.231510009765625, 0.23128985595703125, 0.2311628875732422, 0.231077880859375, 0.23118438720703124, 0.23118028259277343, 0.23097445678710937, 0.23111167907714844, 0.23096627807617187, 0.23094784545898436, 0.23088230895996092, 0.23102668762207032, 0.23121714782714844, 0.2314086456298828, 0.23167794799804686, 0.23117721557617188, 0.23124172973632812, 0.2311014404296875, 0.23113113403320312, 0.23106661987304689, 0.23094578552246095, 0.2310102996826172, 0.2310451202392578, 0.23154483032226564, 0.23112704467773437, 0.23111372375488282, 0.2310451202392578, 0.23104716491699218, 0.23118336486816407, 0.2313881530761719, 0.23125094604492188, 0.2312130584716797, 0.23101644897460938, 0.2333429718017578, 0.23087820434570314, 0.2312447967529297, 0.23098573303222655, 0.23105126953125, 0.2310522918701172, 0.23171685791015625, 0.2312447967529297, 0.23110041809082033, 0.23101849365234375, 0.2310399932861328, 0.231014404296875, 0.23103385925292969, 0.23098471069335938, 0.23129804992675781, 0.23095091247558594, 0.23119769287109376, 0.2312857666015625, 0.23109939575195312, 0.2308915252685547, 0.2313236541748047, 0.23161138916015625, 0.23103897094726564, 0.23182643127441407, 0.23234150695800782, 0.4790743103027344, 0.23154176330566406, 0.23141477966308593, 0.23161036682128905, 0.23167897033691406, 0.23150694274902345, 0.2312376251220703, 0.231583740234375, 0.2312970275878906, 0.2310840301513672, 0.23138304138183594, 0.2310635528564453, 0.23118336486816407, 0.23100006103515625, 0.23120793151855468, 0.23087615966796876, 0.2310133819580078, 0.230898681640625, 0.23100723266601564, 0.23108096313476562, 0.23136668395996093, 0.23115977478027344, 0.2310768585205078, 0.231046142578125, 0.23245619201660156, 0.23092428588867187, 0.2311414337158203, 0.2312303924560547, 0.2311260223388672, 0.2310102996826172, 0.2311925811767578, 0.23104920959472655, 0.23100933837890625, 0.2310194549560547, 0.23117926025390625, 0.2311075897216797, 0.23118540954589845, 0.23110348510742187, 0.23108096313476562, 0.23085977172851563, 0.23155711364746093, 0.23233740234375, 0.23105946350097656, 0.2312796173095703, 0.2310062713623047, 0.23090272521972657, 0.23148646545410156, 0.23091098022460937, 0.2309878387451172, 0.23087312316894532, 0.23152015686035157, 0.2321797180175781, 0.23130316162109374, 0.230898681640625, 0.23095295715332032, 0.23084236145019532, 0.2311014404296875, 0.2311331787109375, 0.23123052978515626, 0.2309252471923828, 0.23105740356445312, 0.23117619323730468, 0.23105740356445312, 0.48152984619140626, 0.2311157684326172, 0.23090380859375, 0.23155302429199218, 0.23127449035644532, 0.23103897094726564, 0.23097958374023436, 0.2311710662841797, 0.23115980529785157, 0.23104103088378905, 0.23108198547363282, 0.23097036743164062, 0.23084544372558594, 0.2313185272216797, 0.23110552978515625, 0.23093862915039062, 0.23098060607910156, 0.23100621032714844, 0.23143014526367187, 0.2317823944091797, 0.23226675415039064, 0.2317117462158203, 0.23127757263183593, 0.23153868103027345, 0.2311751708984375, 0.23130726623535156, 0.23114341735839844, 0.23096524047851563, 0.23087103271484374, 0.23130419921875, 0.2320271301269531, 0.23118643188476562, 0.23128370666503906, 0.2312806396484375, 0.23114854431152343, 0.2313799743652344, 0.23123968505859374, 0.23101542663574218, 0.2314403839111328, 0.23084031677246095, 0.23116184997558595, 0.23074099731445313, 0.23112396240234376, 0.23095706176757813, 0.23086592102050782, 0.2307573699951172, 0.23168409729003905, 0.23131033325195313, 0.2311393280029297, 0.2310215606689453, 0.23085977172851563, 0.23086285400390624, 0.2309519348144531, 0.2308239288330078, 0.23091813659667967, 0.23077786254882812, 0.2309222412109375, 0.2310697021484375, 0.2309160919189453, 0.23086592102050782, 0.23115367126464845, 0.2308720703125, 0.23090789794921876, 0.48265728759765625, 0.23116595458984374, 0.23137178039550782, 0.2312478790283203, 0.23101849365234375, 0.23090687561035156, 0.23088333129882813, 0.23087309265136718, 0.23085466003417968, 0.23150079345703126, 0.231151611328125, 0.23109120178222656, 0.231077880859375, 0.23160421752929689, 0.23117926025390625, 0.23098675537109375, 0.23116493225097656, 0.2308157501220703, 0.231014404296875, 0.231046142578125, 0.2309580841064453, 0.23116697692871094, 0.23099903869628907, 0.23071437072753906, 0.23106150817871093, 0.23102566528320312, 0.23100422668457032, 0.23102662658691406, 0.23118028259277343, 0.23100518798828126, 0.23101951599121093, 0.2309969940185547, 0.23098880004882813, 0.23096115112304688, 0.23098162841796874, 0.23114035034179686, 0.23111065673828124, 0.23085157775878906, 0.23094886779785156, 0.23084031677246095, 0.23096832275390625, 0.2307747802734375, 0.23147724914550782, 0.23169024658203125, 0.23177731323242187, 0.23133180236816406, 0.23098675537109375, 0.2309550018310547, 0.23120077514648438, 0.23074815368652343, 0.23088742065429688, 0.23084031677246095, 0.23159091186523437, 0.23110963439941407, 0.2308495330810547, 0.23108096313476562, 0.23082188415527344, 0.23110041809082033, 0.23168614196777343, 0.23109939575195312, 0.23103077697753907, 0.2309160919189453, 0.23111378479003905, 0.481429443359375, 0.2311956481933594, 0.2310645751953125, 0.23094989013671874, 0.23127655029296876, 0.23099186706542968, 0.2309416961669922, 0.23101747131347655, 0.23098880004882813, 0.23092941284179688, 0.23106661987304689, 0.23098060607910156, 0.231046142578125, 0.23074508666992188, 0.23084442138671876, 0.2313000946044922, 0.23091404724121095, 0.23080551147460937, 0.23100416564941406, 0.23077171325683593, 0.23084031677246095, 0.23112908935546875, 0.23205580139160156, 0.231077880859375, 0.23106048583984376, 0.2309580841064453, 0.23238552856445313, 0.23106764221191406, 0.2311690216064453, 0.23111065673828124, 0.23110041809082033, 0.23188890075683594, 0.2314956817626953, 0.23113113403320312, 0.23112908935546875, 0.23114341735839844, 0.23109735107421875, 0.23145062255859375, 0.23099288940429688, 0.231088134765625, 0.2312130584716797, 0.2308526153564453, 0.2309345245361328, 0.23095706176757813, 0.23097138977050782, 0.23116082763671875, 0.23089765930175782, 0.23111167907714844, 0.23134310913085937, 0.23081062316894532, 0.23087411499023439, 0.2307010498046875, 0.2309171142578125, 0.23114547729492188, 0.23143423461914062, 0.2308792266845703, 0.23096934509277345, 0.23170559692382814, 0.2311894989013672, 0.2319667205810547, 0.23111680603027343, 0.23170559692382814, 0.23135232543945314]",tokens/s,4.257359816288809,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1448.177664,2449.997824,0.0,1803.55072,1664.521216,s,10,1.3644380798339844,0.13644380798339845,0.0014011159264243525,0.13627196502685546,0.13786112670898437,0.13869882049560547,0.13936897552490235,"[0.13953651428222658, 0.13767497253417968, 0.1349912567138672, 0.13507321166992187, 0.13563778686523437, 0.13482015991210938, 0.13582060241699218, 0.13672332763671874, 0.13708172607421876, 0.13707852172851562]",tokens/s,1876.2302502664566,kWh,1.5975721627921314e-06,8.753909460972716e-07,6.686964809027082e-06,9.159927917916485e-06,tokens/kWh,27947818.180891287,MB,1448.472576,2449.997824,0.0,1803.55072,1763.593728,s,10,81.3460283203125,8.13460283203125,0.011946610722273996,8.1315908203125,8.150488769531249,8.15235205078125,8.153842675781249,"[8.13529541015625, 8.1396748046875, 8.11939111328125, 8.1203115234375, 8.14693603515625, 8.12648193359375, 8.12576123046875, 8.15007470703125, 8.15421533203125, 8.12788623046875]",tokens/s,7.744692801955592,kWh,9.601664840011627e-05,5.262418258723866e-05,0.0003946831618423715,0.0005433239928297265,tokens/kWh,115952.91360479954,,s,629,82.4390829467774,0.13106372487563966,0.01624326295714781,0.1289318389892578,0.12986593322753906,0.13038284606933595,0.2649074096679688,"[0.13380709838867189, 0.1322936248779297, 0.13054360961914063, 0.1302783966064453, 0.12868301391601564, 0.12876185607910157, 0.12865126037597657, 0.12903424072265626, 0.12871475219726564, 0.12893490600585938, 0.1289205780029297, 0.12972032165527345, 0.1293711395263672, 0.12937216186523437, 0.1286614990234375, 0.12999168395996094, 0.12912229919433593, 0.1288099822998047, 0.12900044250488282, 0.12889395141601562, 0.12870246887207032, 0.12895744323730468, 0.12921856689453126, 0.128795654296875, 0.12864614868164062, 0.12892466735839844, 0.1287782440185547, 0.1288970184326172, 0.12859596252441408, 0.13040333557128905, 0.1292042236328125, 0.12866867065429688, 0.12910386657714842, 0.12879667663574218, 0.12910797119140624, 0.12875161743164062, 0.12865228271484375, 0.12910182189941405, 0.12855398559570314, 0.12869017028808594, 0.1285877685546875, 0.12864717102050782, 0.12905984497070314, 0.12876800537109376, 0.1286492156982422, 0.128606201171875, 0.12849868774414064, 0.12877619934082032, 0.12863282775878906, 0.1288140869140625, 0.12869119262695314, 0.12871987915039063, 0.1286246337890625, 0.12838092041015625, 0.12851097106933593, 0.12944383239746093, 0.1286277160644531, 0.1287710723876953, 0.12888677978515625, 0.12879052734375, 0.12854885864257812, 0.1299640350341797, 0.2665809936523437, 0.12871066284179689, 0.12894105529785158, 0.1295984649658203, 0.12898304748535155, 0.12851507568359374, 0.12878950500488281, 0.12854783630371094, 0.12850688171386718, 0.12838911437988282, 0.12960768127441405, 0.12967936706542968, 0.1298472900390625, 0.1296302032470703, 0.13025894165039062, 0.13039308166503907, 0.1289871368408203, 0.1294192657470703, 0.1304954833984375, 0.12884991455078126, 0.12983807373046874, 0.1289390106201172, 0.12923085021972655, 0.1289871368408203, 0.12885401916503905, 0.12969369506835937, 0.12940800476074218, 0.12860415649414061, 0.12912844848632812, 0.12885093688964844, 0.12888882446289063, 0.1289707489013672, 0.12897279357910157, 0.12949708557128906, 0.1289758758544922, 0.12905471801757812, 0.12891751098632812, 0.1290301513671875, 0.1288406982421875, 0.1292779541015625, 0.12873216247558594, 0.1292216339111328, 0.12959539794921876, 0.12929945373535157, 0.12899020385742188, 0.1289697265625, 0.13126144409179688, 0.12883660888671875, 0.12922367858886719, 0.12980940246582032, 0.12895846557617188, 0.12943463134765626, 0.12903321838378906, 0.1292216339111328, 0.1292656707763672, 0.12897279357910157, 0.1300193328857422, 0.12942233276367188, 0.12876185607910157, 0.12872601318359375, 0.1286103057861328, 0.12886732482910157, 0.128753662109375, 0.2649610290527344, 0.1289195556640625, 0.12843621826171875, 0.12883558654785157, 0.12871475219726564, 0.12869529724121093, 0.12869325256347655, 0.12878950500488281, 0.1287209014892578, 0.12903117370605469, 0.12896153259277343, 0.1297407989501953, 0.12884378051757814, 0.12859085083007812, 0.12871270751953126, 0.1286604766845703, 0.12877926635742187, 0.12865126037597657, 0.12881817626953124, 0.12926361083984375, 0.12863999938964843, 0.1286297607421875, 0.1285550079345703, 0.12893798828125, 0.128932861328125, 0.12907008361816405, 0.12877516174316406, 0.12867584228515624, 0.12991897583007814, 0.1287045135498047, 0.12865843200683594, 0.12990669250488282, 0.1296609344482422, 0.12844236755371094, 0.12855091857910156, 0.12866867065429688, 0.12861337280273438, 0.1286871032714844, 0.12876185607910157, 0.12873420715332032, 0.1288775634765625, 0.12874342346191406, 0.1287720947265625, 0.12879257202148436, 0.1287915496826172, 0.1290198974609375, 0.12967730712890624, 0.129797119140625, 0.12876390075683594, 0.128795654296875, 0.12886834716796874, 0.128932861328125, 0.12865437316894532, 0.12863279724121093, 0.1286420440673828, 0.12882432556152343, 0.12872703552246093, 0.12905267333984374, 0.12885299682617188, 0.1287772216796875, 0.1287188415527344, 0.12861439514160156, 0.1287772216796875, 0.26476953125, 0.12873829650878907, 0.12883148193359376, 0.12851199340820313, 0.12869631958007813, 0.12847923278808593, 0.12853042602539064, 0.12893798828125, 0.12854988098144532, 0.12875263977050783, 0.128505859375, 0.12852224731445314, 0.12876800537109376, 0.12872909545898437, 0.12887347412109376, 0.12867584228515624, 0.12863282775878906, 0.1286860809326172, 0.1286614990234375, 0.12861952209472657, 0.12875570678710938, 0.12855398559570314, 0.12889599609375, 0.12875263977050783, 0.12984013366699218, 0.1295626220703125, 0.12891545104980467, 0.1287720947265625, 0.12863282775878906, 0.12864717102050782, 0.12877311706542968, 0.12869529724121093, 0.12856422424316405, 0.1285201873779297, 0.1286625213623047, 0.1287720947265625, 0.12851199340820313, 0.1287403564453125, 0.1289318389892578, 0.12899635314941407, 0.12849664306640626, 0.12876902770996093, 0.13012684631347657, 0.12892160034179687, 0.1313116149902344, 0.13377740478515626, 0.12898611450195313, 0.12884889221191406, 0.12867481994628907, 0.1287475128173828, 0.1288089599609375, 0.12871168518066406, 0.12876390075683594, 0.12973362731933594, 0.12869223022460938, 0.12912229919433593, 0.12858781433105468, 0.12859286499023437, 0.128395263671875, 0.12851199340820313, 0.12858982849121095, 0.1285191650390625, 0.12845773315429687, 0.26590206909179687, 0.13000090026855468, 0.1293588409423828, 0.1295380554199219, 0.12994560241699218, 0.12998042297363283, 0.12985958862304686, 0.13036749267578124, 0.12995071411132814, 0.1288765411376953, 0.12879974365234376, 0.12941722106933592, 0.12930047607421874, 0.12891136169433592, 0.12978790283203126, 0.12877619934082032, 0.12878746032714844, 0.12919500732421876, 0.12896255493164063, 0.1289134063720703, 0.1290373077392578, 0.12975308227539062, 0.12882841491699218, 0.1290198974609375, 0.12901478576660155, 0.12898918151855468, 0.12967628479003906, 0.12916940307617186, 0.12887449645996094, 0.12921139526367187, 0.12869631958007813, 0.12922880554199218, 0.12872909545898437, 0.12871168518066406, 0.12892979431152343, 0.129185791015625, 0.12892672729492188, 0.1294254150390625, 0.13041868591308595, 0.12922265625, 0.12881613159179686, 0.12874342346191406, 0.12914994812011718, 0.12993023681640625, 0.12914892578125, 0.12977766418457032, 0.1297592315673828, 0.12924826049804689, 0.12885197448730468, 0.12885606384277343, 0.12906803894042967, 0.12960665893554688, 0.12895333862304686, 0.1296312255859375, 0.12864512634277345, 0.13046885681152343, 0.12909056091308593, 0.13064703369140626, 0.12966400146484375, 0.13024972534179688, 0.12901683044433593, 0.12863385009765624, 0.1289738311767578, 0.2638612365722656, 0.1288478698730469, 0.1288151092529297, 0.12867584228515624, 0.1286604766845703, 0.12864102172851563, 0.12869223022460938, 0.12870553588867187, 0.1286871032714844, 0.12933427429199218, 0.12895846557617188, 0.1287362518310547, 0.12880487060546875, 0.12846080017089845, 0.12952064514160155, 0.12865740966796874, 0.12858982849121095, 0.1286840362548828, 0.128827392578125, 0.12865228271484375, 0.13033267211914062, 0.1298913269042969, 0.12930662536621093, 0.12879769897460938, 0.12924006652832032, 0.12911514282226563, 0.12893696594238283, 0.12896563720703125, 0.1286840362548828, 0.12859596252441408, 0.1286553649902344, 0.12849356079101562, 0.12909567260742189, 0.129185791015625, 0.12871168518066406, 0.12875161743164062, 0.12851199340820313, 0.12922265625, 0.1306798095703125, 0.12952780151367188, 0.129396728515625, 0.12962509155273438, 0.12861952209472657, 0.12871168518066406, 0.1286543426513672, 0.12885708618164063, 0.12871475219726564, 0.12898918151855468, 0.12887551879882814, 0.12875263977050783, 0.1291130828857422, 0.12952268981933593, 0.13002137756347656, 0.12975410461425782, 0.12933427429199218, 0.12950425720214845, 0.1291673583984375, 0.12911001586914062, 0.12872294616699217, 0.12854170227050782, 0.12845362854003906, 0.12869223022460938, 0.12868812561035156, 0.26576177978515625, 0.12924826049804689, 0.12867788696289062, 0.1287782440185547, 0.12968960571289062, 0.12889497375488282, 0.12866969299316405, 0.1288099822998047, 0.12962098693847657, 0.12901580810546875, 0.12879359436035157, 0.12876390075683594, 0.1289564208984375, 0.12874240112304688, 0.1286871032714844, 0.12908851623535156, 0.1289134063720703, 0.12863591003417968, 0.1290577850341797, 0.1289635772705078, 0.12908441162109374, 0.12881715393066406, 0.12893798828125, 0.1303019561767578, 0.12936192321777343, 0.12894105529785158, 0.1289318389892578, 0.129227783203125, 0.12891136169433592, 0.12872294616699217, 0.1287587890625, 0.1295083465576172, 0.1288826904296875, 0.1295636444091797, 0.1291714630126953, 0.12932505798339844, 0.1291663360595703, 0.12864614868164062, 0.12883148193359376, 0.12890419006347656, 0.12887962341308593, 0.12895846557617188, 0.12895333862304686, 0.12924826049804689, 0.12880793762207032, 0.12882330322265625, 0.12863282775878906, 0.12880076599121093, 0.12866764831542968, 0.1288140869140625, 0.12918885803222657, 0.12923085021972655, 0.12873216247558594, 0.12884378051757814, 0.128822265625, 0.12880793762207032, 0.12889190673828124, 0.12862361145019532, 0.12879872131347656, 0.12901785278320313, 0.12870655822753907, 0.12871168518066406, 0.1288826904296875, 0.26630654907226564, 0.1309654998779297, 0.1296506805419922, 0.1291356201171875, 0.128932861328125, 0.12939263916015625, 0.12916326904296874, 0.1299814453125, 0.1290997772216797, 0.12907008361816405, 0.12940390014648437, 0.12927078247070312, 0.12884991455078126, 0.12983091735839844, 0.13016986083984375, 0.1290373077392578, 0.12927999877929688, 0.129438720703125, 0.13061734008789064, 0.12889190673828124, 0.12984831237792968, 0.13007769775390626, 0.13092965698242187, 0.12898611450195313, 0.1293158416748047, 0.12955955505371095, 0.12935475158691406, 0.1288478698730469, 0.1289697265625, 0.12932403564453124, 0.12959744262695314, 0.12883763122558595, 0.1288212432861328, 0.12967628479003906, 0.12876287841796874, 0.12947251892089845, 0.12944998168945313, 0.12905574035644532, 0.12925234985351564, 0.12954010009765626, 0.12926771545410157, 0.12993536376953124, 0.12876185607910157, 0.12890725708007814, 0.12937216186523437, 0.12992716979980468, 0.13034597778320312, 0.12930764770507813, 0.12888064575195313, 0.12883865356445312, 0.12890316772460939, 0.12986265563964844, 0.12991384887695312, 0.1290188751220703, 0.1290076141357422, 0.12887962341308593, 0.1289758758544922, 0.12898611450195313, 0.1288642578125, 0.12870758056640624, 0.12963226318359375, 0.1289932861328125, 0.12938035583496094, 0.26711550903320314, 0.12889395141601562, 0.12987904357910157, 0.12996607971191407, 0.12963941955566408, 0.12896461486816407, 0.12874444580078126, 0.12914175415039061, 0.129512451171875, 0.12889804077148437, 0.12933938598632813, 0.12918885803222657, 0.1291653137207031, 0.12967526245117186, 0.13058149719238282, 0.12971212768554688, 0.12983705139160157, 0.1312491455078125, 0.12968960571289062, 0.1296855010986328, 0.12951449584960936, 0.12930458068847656, 0.12980633544921874, 0.12941004943847656, 0.12944691467285158, 0.12926669311523437, 0.12979507446289062, 0.12961485290527344, 0.1289553985595703, 0.12896563720703125, 0.12977459716796874, 0.13095936584472656, 0.13045452880859376, 0.13097267150878905, 0.12978483581542968, 0.13107609558105468, 0.13026611328125, 0.1297100830078125, 0.12907212829589843, 0.1287209014892578, 0.12875468444824217, 0.12891647338867188, 0.1297838134765625, 0.1291980743408203, 0.1292410888671875, 0.12913253784179687, 0.12873420715332032, 0.12888986206054687, 0.12877516174316406, 0.12898611450195313, 0.1289144287109375, 0.12875059509277345, 0.12870040893554688, 0.12886015319824218, 0.12891647338867188, 0.12871270751953126, 0.12871372985839843, 0.12924620056152344, 0.12940083312988282, 0.12916120910644532, 0.129939453125, 0.12903526306152344, 0.128827392578125, 0.2668564453125, 0.1297244110107422, 0.129291259765625, 0.12891647338867188, 0.1289871368408203, 0.12860211181640624, 0.1287884826660156, 0.12873420715332032, 0.12900863647460936, 0.12901683044433593, 0.1291663360595703, 0.1291724853515625, 0.12918885803222657, 0.12881715393066406, 0.12899122619628905, 0.1286604766845703, 0.1290997772216797, 0.12963839721679687, 0.12860415649414061, 0.12885299682617188, 0.1286973419189453, 0.128901123046875, 0.12870758056640624, 0.12876185607910157, 0.12874855041503908, 0.1287393341064453, 0.12990669250488282, 0.13078732299804688, 0.1296609344482422, 0.12924006652832032, 0.12881715393066406, 0.12901580810546875, 0.1292769317626953, 0.1289758758544922, 0.12890316772460939, 0.12894514465332033, 0.12875672912597655, 0.12894720458984374, 0.12874137878417968, 0.12878437805175783, 0.12906803894042967, 0.12887347412109376, 0.12873829650878907, 0.1290496063232422, 0.1295636444091797, 0.12900044250488282, 0.12882432556152343, 0.12890931701660155, 0.12891853332519532, 0.1297643585205078, 0.1288028106689453, 0.12875570678710938, 0.12874137878417968, 0.12874444580078126, 0.12869017028808594, 0.12901274108886718, 0.12943257141113282, 0.12871270751953126, 0.12867071533203125, 0.12885708618164063, 0.12893594360351562, 0.1285867462158203, 0.12889292907714844]",tokens/s,7.629876237294903,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-66779488-2166927254b30de812512db6;b4637e14-53d6-41ab-8655-698c95b875a3) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2805.69856,8389.132288,0.0,7742.685184,7007.0144,s,10,5.757144287109376,0.5757144287109376,0.0011485729711129637,0.5757064819335938,0.5769704406738282,0.5771491607666016,0.5772921368408204,"[0.5760953979492187, 0.5769307250976563, 0.574836669921875, 0.5745257568359375, 0.574697998046875, 0.5738778076171875, 0.5753175659179688, 0.5767816162109375, 0.577327880859375, 0.5767528686523438]",tokens/s,444.66490196050984,kWh,6.783310471125591e-06,3.7157116531489013e-06,3.30976653669996e-05,4.3596687491274095e-05,tokens/kWh,5872005.758493431,MB,2805.69856,8389.132288,0.0,7742.685184,7283.984384,s,10,336.2841484375,33.62841484375,0.0041773861216769514,33.62973046875,33.63291328125,33.63334921875,33.63369796875,"[33.62066796875, 33.6314140625, 33.6304453125, 33.62453125, 33.627359375, 33.629015625, 33.63378515625, 33.63111328125, 33.623, 33.63281640625]",tokens/s,1.873415690056198,kWh,0.0003969375698617947,0.00021755745967517217,0.0019112099178555991,0.0025257049473925665,tokens/kWh,24943.531137727943,,s,629,340.93043640136693,0.5420197717032864,0.06847421137969394,0.5337415771484375,0.53422822265625,0.5344366455078124,1.1091660595703123,"[0.5336361083984374, 0.5341531982421875, 0.533359619140625, 0.5337293090820312, 0.53304931640625, 0.5336708984375, 0.5331292114257813, 0.5338419189453125, 0.5331640625, 0.5335398559570312, 0.53309033203125, 0.5335951538085938, 0.5331834716796875, 0.5338798217773437, 0.5333298950195312, 0.5336842041015625, 0.5331845092773437, 0.5336678466796875, 0.533411865234375, 0.53406103515625, 0.5336094970703125, 0.5338388671875, 0.5332828369140625, 0.5337896728515625, 0.5332971801757812, 0.533770263671875, 0.5334599609375, 0.5337487182617188, 0.5331834716796875, 0.5339166870117188, 0.5336627197265625, 0.5344092407226563, 0.5335090942382813, 0.5339432983398438, 0.533738525390625, 0.5342853393554687, 0.5333934326171875, 0.5340252075195312, 0.53340771484375, 0.534096923828125, 0.5334374389648437, 0.5337191162109375, 0.5333012084960937, 0.5337835693359375, 0.5332633666992187, 0.53411328125, 0.533443603515625, 0.5338327026367188, 0.5333759765625, 0.5339893798828125, 0.5333831787109375, 0.5338306274414063, 0.5334395141601562, 0.5340466918945312, 0.5333514404296875, 0.5337937622070312, 0.5338665161132813, 0.5344153442382813, 0.5337047119140625, 0.5338112182617187, 0.5332122192382812, 0.5337702026367187, 1.1116871337890626, 0.5333892822265625, 0.53395556640625, 0.5335818481445312, 0.534329345703125, 0.53378662109375, 0.533939208984375, 0.533485595703125, 0.5340098266601563, 0.533265380859375, 0.5337507934570312, 0.5338316650390625, 0.53416552734375, 0.533770263671875, 0.5342669067382813, 0.5334896850585937, 0.5339904174804687, 0.5334537963867187, 0.5338736572265625, 0.5339002685546875, 0.5341122436523438, 0.5333739624023438, 0.5337057495117188, 0.5331763305664062, 0.5336084594726562, 0.5334323120117187, 0.5338009643554688, 0.5331425170898437, 0.5337620239257812, 0.5331527709960937, 0.5337323608398438, 0.5339801635742187, 0.5345730590820312, 0.53325927734375, 0.5336207275390625, 0.53326953125, 0.5340211791992188, 0.5333349609375, 0.5338777465820312, 0.53340673828125, 0.5337753295898438, 0.533338134765625, 0.533880859375, 0.533570556640625, 0.5340579833984375, 0.5336616821289063, 0.5373572998046875, 0.5339514770507813, 0.53390234375, 0.5336719360351563, 0.5342648315429688, 0.5339279174804688, 0.5340426025390625, 0.5336504516601562, 0.5338839721679688, 0.53328173828125, 0.534540283203125, 0.5336729736328125, 0.5342125854492188, 0.5336329956054687, 0.53383984375, 0.5335726318359375, 0.5343897705078124, 1.10948046875, 0.5335797729492188, 0.5341194458007813, 0.5339064331054687, 0.5345751342773437, 0.5335787353515625, 0.533949462890625, 0.5334251708984376, 0.5337794799804687, 0.5332059936523438, 0.5337763671875, 0.5334661254882812, 0.533986328125, 0.5332305908203125, 0.5337221069335938, 0.5334609985351563, 0.53401904296875, 0.5335316772460937, 0.5339371337890625, 0.53347021484375, 0.5337108764648437, 0.5331456298828126, 0.5338245239257813, 0.5349140625, 0.5346262817382812, 0.5334579467773437, 0.5338726196289062, 0.533796875, 0.5340897216796875, 0.5336565551757813, 0.5343047485351563, 0.5332715454101562, 0.534076416015625, 0.533580810546875, 0.5337415771484375, 0.5334824829101562, 0.53382861328125, 0.53319677734375, 0.5336668090820312, 0.5332664184570313, 0.5337743530273438, 0.5335418701171875, 0.5340856323242188, 0.5335429077148437, 0.53378662109375, 0.5334508056640626, 0.5341675415039062, 0.5349437255859375, 0.5338644409179687, 0.5339566040039062, 0.5342269287109375, 0.5335900268554687, 0.5341214599609375, 0.53355419921875, 0.5342976684570313, 0.5334230346679687, 0.5340139770507812, 0.5335992431640625, 0.5341634521484375, 0.5334210815429687, 0.5340436401367188, 0.5337569580078125, 0.5342617797851562, 1.1091947021484374, 0.5332183227539062, 0.5337579345703125, 0.533496826171875, 0.5339699096679688, 0.53334326171875, 0.5339320068359374, 0.5333401489257813, 0.533749755859375, 0.533454833984375, 0.533738525390625, 0.5335623779296875, 0.5348157348632813, 0.5332367553710937, 0.5340784301757813, 0.5337159423828125, 0.5338306274414063, 0.5333944091796875, 0.5338828735351563, 0.533201904296875, 0.5338593139648438, 0.53323681640625, 0.5340261840820313, 0.5336555786132813, 0.5336821899414063, 0.5331660766601563, 0.5336535034179688, 0.5331763305664062, 0.5336637573242188, 0.5330974731445313, 0.5336381225585938, 0.5332664184570313, 0.5338818359375, 0.533327880859375, 0.53427099609375, 0.5336790771484375, 0.5338746948242188, 0.533159912109375, 0.5337006225585937, 0.5332623291015625, 0.5337293090820312, 0.5337589721679687, 0.533981201171875, 0.533370849609375, 0.5360977783203125, 0.5334261474609375, 0.5341010131835937, 0.5335623779296875, 0.5340078125, 0.5334896850585937, 0.5340006103515625, 0.5336053466796875, 0.5344645385742187, 0.53421875, 0.5342853393554687, 0.5334138793945312, 0.5337979125976563, 0.53326953125, 0.533792724609375, 0.5334814453125, 0.5338665161132813, 0.5334456176757812, 0.53382861328125, 1.1090924072265624, 0.5336094970703125, 0.5341419677734375, 0.5334814453125, 0.5340355224609376, 0.53357666015625, 0.53401904296875, 0.5336771240234375, 0.5337230834960938, 0.5334425048828125, 0.5336268920898437, 0.5331640625, 0.5335726318359375, 0.533123046875, 0.5340631103515625, 0.5331763305664062, 0.5340078125, 0.5347676391601562, 0.5339586791992188, 0.5332183227539062, 0.5338541870117187, 0.5333883056640625, 0.53372314453125, 0.5331834716796875, 0.5336309814453125, 0.5334537963867187, 0.534108154296875, 0.5333411865234375, 0.5341143188476563, 0.5333524780273438, 0.5335510864257812, 0.5337293090820312, 0.5341480712890625, 0.5335869140625, 0.5337671508789062, 0.533243896484375, 0.5338193969726562, 0.5337169799804687, 0.5342074584960937, 0.533265380859375, 0.5335838623046875, 0.533375, 0.5341593627929687, 0.5334948120117188, 0.5341030883789063, 0.5336299438476563, 0.5339105224609375, 0.5335091552734375, 0.5336759643554687, 0.5331793823242188, 0.5338460083007812, 0.5332551879882812, 0.5341522216796875, 0.5335869140625, 0.53411328125, 0.5335480346679687, 0.53422802734375, 0.5365042724609375, 0.5340682373046876, 0.533644287109375, 0.5340825805664062, 0.533528564453125, 0.5341306762695313, 1.1086192626953124, 0.5332972412109375, 0.534067138671875, 0.5337210693359375, 0.5342228393554688, 0.53347021484375, 0.53422900390625, 0.5334702758789063, 0.5339790649414062, 0.5337579345703125, 0.5341173706054687, 0.5335818481445312, 0.534245361328125, 0.5336678466796875, 0.5341265869140625, 0.5333944091796875, 0.5336135864257813, 0.5332551879882812, 0.5341911010742187, 0.533359619140625, 0.5341552734375, 0.5331834716796875, 0.53369140625, 0.5332572021484375, 0.53432421875, 0.533712890625, 0.5336708984375, 0.533106689453125, 0.533802001953125, 0.5334364013671875, 0.53359716796875, 0.5331845092773437, 0.53380810546875, 0.5332346801757812, 0.5336627197265625, 0.5332254638671875, 0.5337774047851562, 0.5336309814453125, 0.537302001953125, 0.5335510864257812, 0.5340999755859375, 0.5334763793945313, 0.5337241821289063, 0.5331824340820313, 0.5339197387695312, 0.53344970703125, 0.533875732421875, 0.53338623046875, 0.5339381713867187, 0.5334395141601562, 0.5338777465820312, 0.5334159545898437, 0.534012939453125, 0.5338378295898437, 0.5343323974609375, 0.534024169921875, 0.534361083984375, 0.5335521240234375, 0.5342996826171875, 0.5335305786132812, 0.533734375, 0.5334640502929687, 0.5342750854492188, 1.1099852294921875, 0.533232666015625, 0.5340067749023437, 0.5332183227539062, 0.5340108642578125, 0.5337364501953125, 0.534192138671875, 0.533570556640625, 0.5341460571289063, 0.533876708984375, 0.53406005859375, 0.5334681396484375, 0.5340528564453125, 0.5333984985351562, 0.53374462890625, 0.5331937255859375, 0.5337579345703125, 0.533475341796875, 0.5339535522460938, 0.533396484375, 0.53382861328125, 0.5333401489257813, 0.5337302856445313, 0.535394287109375, 0.5337927856445313, 0.5334507446289063, 0.5339443359375, 0.5337825317382813, 0.5344010009765625, 0.533728271484375, 0.5337845458984375, 0.5336329956054687, 0.5338839111328125, 0.5333759765625, 0.5338009643554688, 0.533201904296875, 0.5340794677734375, 0.5336320190429688, 0.5339146118164062, 0.53344873046875, 0.5339638061523437, 0.5336893310546875, 0.5338931274414063, 0.5337426147460937, 0.5341430053710937, 0.5335347290039063, 0.5341624145507813, 0.5345700073242188, 0.5347133178710938, 0.5338163452148438, 0.53465087890625, 0.5338040161132812, 0.5341941528320312, 0.533674072265625, 0.5342606811523437, 0.5343539428710937, 0.5342125854492188, 0.5335050048828125, 0.5339566040039062, 0.533570556640625, 0.5341276245117188, 0.5338695678710937, 0.5341880493164063, 1.111352294921875, 0.533518310546875, 0.534761474609375, 0.5335090942382813, 0.5338511352539063, 0.5336331176757813, 0.5341572265625, 0.5336279296875, 0.5341102294921874, 0.5339033813476562, 0.53374462890625, 0.5332838134765625, 0.5336801147460938, 0.5332142333984375, 0.5337333984375, 0.5331947631835937, 0.534908935546875, 0.533190673828125, 0.5336279296875, 0.5330892944335938, 0.5340794677734375, 0.5336063842773437, 0.5338890380859375, 0.5336463623046875, 0.5338880004882812, 0.5336104736328126, 0.533712890625, 0.5332838134765625, 0.5341624145507813, 0.5335418701171875, 0.5341460571289063, 0.53351220703125, 0.5338890380859375, 0.533243896484375, 0.5342177124023437, 0.5337088012695312, 0.5343447265625, 0.533712890625, 0.5340774536132813, 0.5339156494140626, 0.5344481201171875, 0.5336882934570313, 0.5341992797851562, 0.5336084594726562, 0.533676025390625, 0.5333197021484375, 0.5338849487304688, 0.533917724609375, 0.53441943359375, 0.5336135864257813, 0.5341010131835937, 0.5335132446289063, 0.5340897216796875, 0.5336616821289063, 0.5339934692382813, 0.5337794799804687, 0.5340200805664063, 0.5335142211914062, 0.5342648315429688, 0.533865478515625, 0.5339801635742187, 0.5338695678710937, 0.534255615234375, 1.112015869140625, 0.5335879516601563, 0.534097900390625, 0.5332183227539062, 0.5336187133789062, 0.5332059936523438, 0.5337405395507813, 0.5333309326171874, 0.5338091430664063, 0.533423095703125, 0.5338306274414063, 0.533191650390625, 0.5339586791992188, 0.5338961791992187, 0.5337569580078125, 0.5333165893554688, 0.534349853515625, 0.5343836059570313, 0.5336043701171875, 0.5334517822265625, 0.5338685913085938, 0.5332326049804688, 0.5336350708007812, 0.5330892944335938, 0.5336309814453125, 0.5331548461914063, 0.5336053466796875, 0.5331824340820313, 0.5337999267578125, 0.5332408447265625, 0.533622802734375, 0.5338716430664062, 0.5345494995117187, 0.53351220703125, 0.5343057861328125, 0.5334180297851563, 0.5338480224609375, 0.53349169921875, 0.5338357543945312, 0.533622802734375, 0.533875732421875, 0.5334579467773437, 0.5337610473632812, 0.5334579467773437, 0.5340170288085937, 0.5332745971679688, 0.534091796875, 0.5333995361328125, 0.5338532104492187, 0.5333872680664062, 0.5340620727539063, 0.533886962890625, 0.5340108642578125, 0.5335654296875, 0.533928955078125, 0.5333514404296875, 0.533950439453125, 0.5334404907226562, 0.5343201293945312, 0.5335582885742187, 0.5337221069335938, 0.5335408935546875, 0.5341378784179688, 1.111615478515625, 0.5335418701171875, 0.534171630859375, 0.5334824829101562, 0.533771240234375, 0.5340958862304688, 0.5341583251953125, 0.5335234375, 0.5342811889648438, 0.5338716430664062, 0.53395556640625, 0.533296142578125, 0.5340877075195313, 0.53382861328125, 0.5342125854492188, 0.5335961303710938, 0.5341245727539062, 0.5336575927734375, 0.5344921875, 0.5339248657226563, 0.5340242309570312, 0.533560302734375, 0.5341531982421875, 0.5337302856445313, 0.5339627685546875, 0.5335162963867187, 0.535699462890625, 0.5336187133789062, 0.5340529174804688, 0.5334834594726563, 0.5338634033203125, 0.5333811645507812, 0.5340415649414062, 0.5334630126953125, 0.53372314453125, 0.5333606567382813, 0.5338726196289062, 0.5338736572265625, 0.5340108642578125, 0.5332828369140625, 0.5338992919921876, 0.5332766723632812, 0.5340753784179687, 0.5338142700195313, 0.5339904174804687, 0.5335613403320313, 0.5342269287109375, 0.533306396484375, 0.5340200805664063, 0.5333984985351562, 0.5338890380859375, 0.5335675048828125, 0.5339054565429687, 0.5333565063476563, 0.5340200805664063, 0.5336309814453125, 0.5339617309570313, 0.5336258544921875, 0.5341859741210937, 0.5334886474609375, 0.5339893798828125, 0.53378662109375, 0.534213623046875]",tokens/s,1.8449511479212646,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31a5-47ff911e2f52e0f35567858c;57263f3c-3583-4f44-9615-39bd96e3c0b7) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe5c-0f1fb3a0510fbf70750fa837;6e48f005-0c93-4c33-acaf-0f7b888504cc) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1416.183808,1705.508864,0.0,1059.06176,901.251072,s,10,1.2556827774047852,0.12556827774047852,0.0020066947239277956,0.12461985778808593,0.12691755523681642,0.12903935165405273,0.13073678878784178,"[0.13116114807128906, 0.1245798110961914, 0.12455474853515625, 0.12628569793701172, 0.126446044921875, 0.12463142395019532, 0.12460829162597656, 0.12491244506835937, 0.12422150421142578, 0.12428166198730468]",tokens/s,2038.731474274853,kWh,1.4767323517137103e-06,8.091788095589435e-07,5.457882838524886e-06,7.74379399979754e-06,tokens/kWh,33058730.64375074,MB,1416.47872,1705.508864,0.0,1059.06176,931.976704,s,10,75.24034423828124,7.524034423828125,0.018117587645973928,7.51759228515625,7.546301123046875,7.555124682617188,7.562183530273438,"[7.5639482421875, 7.54434033203125, 7.51252392578125, 7.5294287109375, 7.5317822265625, 7.49836865234375, 7.51694482421875, 7.50782275390625, 7.51704736328125, 7.51813720703125]",tokens/s,8.373167432685198,kWh,8.920738770729968e-05,4.889187051034242e-05,0.00032243887947867866,0.0004605381376963208,tokens/kWh,136796.48837582752,,s,629,76.24045568847656,0.12120899155560659,0.014857766034498806,0.119236572265625,0.12046213073730469,0.121122216796875,0.24360714599609376,"[0.12457164764404297, 0.12292813110351562, 0.12142694091796875, 0.12034457397460938, 0.12029849243164062, 0.12040806579589844, 0.12176793670654297, 0.12074700927734375, 0.11929190063476562, 0.11991244506835938, 0.11920486450195313, 0.12124877166748046, 0.12120063781738281, 0.120416259765625, 0.12205363464355469, 0.1201418228149414, 0.11985100555419922, 0.1191731185913086, 0.12010905456542968, 0.12073577880859375, 0.11977008056640626, 0.1192069091796875, 0.11915058898925782, 0.11936870574951172, 0.11920281219482422, 0.11884953308105468, 0.11897650909423828, 0.11890380859375, 0.11936460876464844, 0.11948953247070312, 0.11988787078857421, 0.11970662689208984, 0.11927040100097656, 0.11931136322021485, 0.11918540954589844, 0.11902054595947266, 0.11925606536865234, 0.11928268432617188, 0.1189775390625, 0.12040608215332031, 0.12145555114746094, 0.12119859313964844, 0.11934515380859376, 0.11921715545654298, 0.11957453155517578, 0.11955404663085938, 0.12047564697265625, 0.1195704345703125, 0.11987558746337891, 0.11963187408447265, 0.12172492980957031, 0.12081664276123047, 0.1199636459350586, 0.11969638061523437, 0.119478271484375, 0.11919974517822265, 0.11901952362060547, 0.11969843292236328, 0.11932466888427734, 0.11939430236816406, 0.11932160186767578, 0.11914035034179687, 0.2437795867919922, 0.11910451507568359, 0.11929497528076172, 0.11924992370605468, 0.11924582672119141, 0.1191229476928711, 0.1206702117919922, 0.12058009338378907, 0.11906559753417968, 0.11979878234863281, 0.1204142074584961, 0.120774658203125, 0.12038349151611329, 0.12099174499511718, 0.12017356872558593, 0.12051148986816407, 0.11986022186279296, 0.11982848358154297, 0.12144435119628906, 0.1197875213623047, 0.12066099548339844, 0.11986022186279296, 0.11963085174560546, 0.12066918182373047, 0.12106034851074218, 0.11989606475830078, 0.11947724914550781, 0.12219084930419923, 0.12116684722900391, 0.1208616943359375, 0.12017459106445312, 0.1197844467163086, 0.11975885009765624, 0.11948544311523437, 0.1193359375, 0.11977318572998047, 0.11921920013427735, 0.11890995025634765, 0.11913420867919922, 0.11896729278564454, 0.11922943878173828, 0.11928575897216796, 0.11888127899169922, 0.11916902160644531, 0.11918438720703126, 0.11927756500244141, 0.11924889373779297, 0.11915980529785156, 0.11925708770751953, 0.1192959976196289, 0.11954688262939453, 0.11891097259521484, 0.11927654266357422, 0.11913215637207031, 0.11925504302978515, 0.1196943359375, 0.11949980926513672, 0.11920687866210937, 0.11947622680664062, 0.11929190063476562, 0.11921920013427735, 0.12117094421386719, 0.11926937866210938, 0.24365055847167968, 0.1189969940185547, 0.11894374084472656, 0.11909529876708984, 0.11894271850585937, 0.11864473724365235, 0.11887718200683593, 0.11882701110839844, 0.11909324645996094, 0.11965235137939453, 0.11938406372070312, 0.11919974517822265, 0.11909120178222657, 0.11923046112060547, 0.11929708862304687, 0.11920992279052735, 0.11922946929931641, 0.11920687866210937, 0.11895603179931641, 0.11921510314941407, 0.11924070739746094, 0.11940147399902344, 0.11908505249023438, 0.11927961730957032, 0.11908505249023438, 0.11922227478027343, 0.11948748779296875, 0.11879219055175781, 0.11928473663330078, 0.11918438720703126, 0.11904819488525391, 0.1194260482788086, 0.11940147399902344, 0.11873382568359375, 0.11902361297607422, 0.12077056121826171, 0.11962060546875, 0.11954380798339843, 0.11931648254394531, 0.11892121887207031, 0.11890585327148437, 0.11919769287109375, 0.11923865509033203, 0.11895500946044922, 0.11905228424072266, 0.11905433654785157, 0.11923967742919922, 0.12057087707519532, 0.11927552032470704, 0.11903180694580077, 0.11901952362060547, 0.11904819488525391, 0.11917005157470703, 0.11913728332519531, 0.119225341796875, 0.12148429107666016, 0.11932569885253906, 0.11935846710205078, 0.1191720962524414, 0.1192273941040039, 0.11932876586914062, 0.11924992370605468, 0.11925606536865234, 0.24569036865234375, 0.11900313568115234, 0.11910758209228516, 0.1191229476928711, 0.11929503631591797, 0.11934611511230468, 0.11907071685791015, 0.11920281219482422, 0.11885465240478515, 0.11900006103515624, 0.11911885070800782, 0.1191546859741211, 0.11912806701660156, 0.11937484741210938, 0.1194434585571289, 0.11925606536865234, 0.12019712066650391, 0.12117810821533204, 0.12110336303710938, 0.11963801574707031, 0.11959193420410157, 0.11896627044677735, 0.11922329711914062, 0.11913420867919922, 0.11982438659667968, 0.11933491516113282, 0.11925094604492187, 0.1192959976196289, 0.11907071685791015, 0.1207193603515625, 0.12125491333007812, 0.11938508605957031, 0.1194076156616211, 0.1193338851928711, 0.11934719848632812, 0.11946189117431641, 0.1193482208251953, 0.11920492553710937, 0.11926624298095703, 0.11927142333984375, 0.11934207916259766, 0.11924992370605468, 0.11892121887207031, 0.11940863800048829, 0.119046142578125, 0.12049715423583984, 0.11979878234863281, 0.11931954956054687, 0.11911270141601563, 0.11936768341064453, 0.11907891082763672, 0.11921715545654298, 0.11925094604492187, 0.11922124481201171, 0.11953971099853515, 0.12125491333007812, 0.12110749053955078, 0.1192232666015625, 0.11931136322021485, 0.11899903869628906, 0.12022374725341797, 0.11995238494873046, 0.12026982116699218, 0.247546875, 0.11950592041015624, 0.12091497802734374, 0.12060975646972656, 0.12113203430175781, 0.12091596984863281, 0.12082688140869141, 0.12083404541015624, 0.1219061737060547, 0.12209458923339844, 0.12265574645996094, 0.12046131134033203, 0.11928371429443359, 0.11902365112304687, 0.11975062561035156, 0.11916390228271484, 0.11981926727294921, 0.11897856140136719, 0.11971686553955078, 0.11893145751953126, 0.11913318634033203, 0.11891302490234375, 0.11906358337402344, 0.11885667419433593, 0.11948134613037109, 0.11907788848876953, 0.11915366363525391, 0.1191884765625, 0.11917619323730469, 0.11935641479492187, 0.11914035034179687, 0.11885260772705078, 0.11923353576660156, 0.11925504302978515, 0.11878912353515625, 0.11872665405273437, 0.11999334716796875, 0.11964211273193359, 0.11909737396240234, 0.11932262420654297, 0.119236572265625, 0.11907481384277344, 0.1190799331665039, 0.1189908447265625, 0.11900211334228515, 0.1191956787109375, 0.1193440933227539, 0.1192652816772461, 0.11916185760498046, 0.1206671371459961, 0.11945881652832031, 0.11904307556152344, 0.11939942169189453, 0.11916287994384765, 0.119119873046875, 0.11886386871337891, 0.11886386871337891, 0.11877785491943359, 0.11952845001220704, 0.11905228424072266, 0.11897344207763672, 0.11872563171386719, 0.11880754852294922, 0.24371711730957032, 0.11878604888916015, 0.11898675537109375, 0.11895603179931641, 0.1188629150390625, 0.11867436981201172, 0.11919667053222656, 0.11875635528564453, 0.11883110046386719, 0.11889871978759765, 0.1191341781616211, 0.11897241973876953, 0.11877581024169923, 0.11888025665283203, 0.11885772705078125, 0.11877273559570313, 0.11894477081298828, 0.11868057250976563, 0.11871231842041016, 0.11883929443359376, 0.11886080169677735, 0.11869900512695312, 0.12047666931152344, 0.11934515380859376, 0.11869798278808594, 0.11880044555664063, 0.11878803253173828, 0.1189969940185547, 0.11894886779785156, 0.11878399658203125, 0.12051967620849609, 0.11952333068847656, 0.11878399658203125, 0.11913420867919922, 0.11862118530273437, 0.118830078125, 0.11917721557617188, 0.11974451446533203, 0.11991756439208984, 0.119119873046875, 0.118866943359375, 0.11871743774414062, 0.11889663696289063, 0.11874201965332032, 0.11896115112304688, 0.11909734344482421, 0.11934003448486329, 0.11873280334472656, 0.11927859497070313, 0.1189017562866211, 0.1192652816772461, 0.11888742065429687, 0.11881472015380859, 0.11897138977050781, 0.11900927734375, 0.11879116821289062, 0.11915366363525391, 0.11920281219482422, 0.11915058898925782, 0.1191178207397461, 0.11896422576904297, 0.1189959716796875, 0.1191546859741211, 0.24360858154296874, 0.1190860824584961, 0.11949056243896485, 0.11932466888427734, 0.11897856140136719, 0.12082278442382813, 0.11958783721923828, 0.11911065673828125, 0.11961862182617188, 0.11913311767578125, 0.11897548675537109, 0.11908403015136719, 0.11890585327148437, 0.11893145751953126, 0.11908403015136719, 0.11909324645996094, 0.1192959976196289, 0.119299072265625, 0.11905023956298828, 0.11916390228271484, 0.1194229736328125, 0.11900006103515624, 0.11926016235351562, 0.11916902160644531, 0.1189027862548828, 0.11916185760498046, 0.11893452453613282, 0.11966259002685548, 0.11899494171142579, 0.1189570541381836, 0.11962777709960938, 0.1197875213623047, 0.11925196838378906, 0.11938201904296875, 0.1193338851928711, 0.11967692565917969, 0.11944652557373046, 0.11901542663574219, 0.11911577606201172, 0.11922022247314454, 0.11908710479736329, 0.1187583999633789, 0.12030668640136719, 0.11964620971679688, 0.11896729278564454, 0.1190860824584961, 0.11880140686035157, 0.1189969940185547, 0.11937484741210938, 0.11919155120849609, 0.12108595275878906, 0.12014591979980468, 0.11927244567871094, 0.11913113403320312, 0.11914857482910156, 0.11933999633789062, 0.11990835571289063, 0.11933081817626953, 0.12017356872558593, 0.11968000030517578, 0.11910553741455078, 0.11916902160644531, 0.11892530822753906, 0.24451583862304688, 0.11914854431152344, 0.1193175048828125, 0.11939225769042969, 0.11916492462158203, 0.11957759857177734, 0.1194229736328125, 0.11930931091308594, 0.11903488159179687, 0.11910451507568359, 0.11907584381103516, 0.1191751708984375, 0.12042444610595703, 0.11948851013183594, 0.11933491516113282, 0.11932364654541015, 0.118940673828125, 0.11926220703125, 0.11912397003173827, 0.11893862152099609, 0.1188116455078125, 0.11887615966796874, 0.119299072265625, 0.1189591064453125, 0.11873792266845704, 0.12033638763427734, 0.11966361236572266, 0.11939635467529297, 0.11916799926757812, 0.11972914886474609, 0.11923046112060547, 0.11913420867919922, 0.11927347564697266, 0.11905843353271485, 0.11928371429443359, 0.11918950653076171, 0.11906867218017578, 0.11889049530029297, 0.11908403015136719, 0.11913011169433593, 0.11903385925292968, 0.11906150054931641, 0.11886386871337891, 0.11895807647705078, 0.11882189178466797, 0.11912908935546875, 0.11897856140136719, 0.11894477081298828, 0.11876557159423828, 0.11893965148925781, 0.11924172973632813, 0.11886386871337891, 0.11928883361816406, 0.11904307556152344, 0.11900723266601562, 0.11878195190429687, 0.11875635528564453, 0.11862322998046874, 0.11873996734619141, 0.11917005157470703, 0.1189570541381836, 0.11894579315185547, 0.12027187347412109, 0.24360345458984375, 0.11901952362060547, 0.1191014404296875, 0.11927244567871094, 0.11907481384277344, 0.11987353515625, 0.12033126068115234, 0.11920384216308594, 0.1190287322998047, 0.11961650848388672, 0.11970867156982422, 0.11927961730957032, 0.11992371368408203, 0.1196933135986328, 0.11933695983886719, 0.1192959976196289, 0.11914444732666016, 0.11929804992675781, 0.11933286285400391, 0.11891404724121094, 0.11937894439697265, 0.11968409729003907, 0.11939020538330078, 0.11924992370605468, 0.11931136322021485, 0.11917005157470703, 0.11934207916259766, 0.11891814422607422, 0.11906559753417968, 0.11898982238769532, 0.1191352310180664, 0.1193543701171875, 0.12004045104980468, 0.11920496368408204, 0.11923343658447266, 0.11935132598876953, 0.11902460479736328, 0.1189222412109375, 0.11926732635498047, 0.11971788787841797, 0.11941683197021484, 0.11968409729003907, 0.1194424285888672, 0.11935743713378906, 0.11975987243652343, 0.120447998046875, 0.11971788787841797, 0.11959603118896485, 0.12046540832519531, 0.11941375732421874, 0.11895500946044922, 0.11876659393310547, 0.11868262481689452, 0.1189222412109375, 0.11881267547607421, 0.11860889434814453, 0.1187430419921875, 0.11915058898925782, 0.11882086181640625, 0.11895603179931641, 0.11968102264404297, 0.11917721557617188, 0.11907686614990234, 0.24344166564941405, 0.11888742065429687, 0.1188853759765625, 0.11914854431152344, 0.11891506958007812, 0.11891404724121094, 0.11905023956298828, 0.1190113296508789, 0.11885772705078125, 0.11911065673828125, 0.11948134613037109, 0.11934617614746093, 0.11914956665039063, 0.11941273498535156, 0.11947315216064452, 0.1198571548461914, 0.11957350158691406, 0.12054937744140624, 0.12015001678466797, 0.11967180633544922, 0.1200865249633789, 0.1194434585571289, 0.11917005157470703, 0.11993395233154297, 0.11950182342529297, 0.12140646362304687, 0.1204111328125, 0.11936262512207031, 0.11944236755371093, 0.11988172912597657, 0.11939839935302735, 0.1193912353515625, 0.11940249633789063, 0.1190297622680664, 0.11965542602539063, 0.11903897857666015, 0.11946701049804688, 0.11888540649414063, 0.1192386245727539, 0.1189591064453125, 0.11945471954345703, 0.11889868927001954, 0.1190287322998047, 0.11890380859375, 0.11901644897460938, 0.11902259063720703, 0.11903794860839843, 0.11907481384277344, 0.11954585266113281, 0.11895097351074219, 0.11885465240478515, 0.1198826904296875, 0.11971379089355469, 0.11895398712158203, 0.11973734283447265, 0.11900313568115234, 0.11912397003173827, 0.11940966033935548, 0.11896524810791016, 0.11901952362060547, 0.11930623626708985, 0.118687744140625, 0.11891506958007812]",tokens/s,8.250213017746573,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,6397.865984,20902.838272,0.0,20256.391168,19273.711616,s,10,26.880439941406248,2.6880439941406253,0.0033477874655363365,2.6871883544921875,2.692009619140625,2.6933998291015624,2.6945119970703124,"[2.685803955078125, 2.6947900390625, 2.683811767578125, 2.686383544921875, 2.68633642578125, 2.68401416015625, 2.6879931640625, 2.68876318359375, 2.69170068359375, 2.690843017578125]",tokens/s,95.23653651429312,kWh,3.17124683658282e-05,1.7378374240543054e-05,0.0001501383978884052,0.00019922924049477648,tokens/kWh,1284951.9446253772,MB,6401.134592,20902.838272,0.0,20256.391168,19862.692352,s,10,1586.032109375,158.60321093750002,0.015321928299545614,158.5981796875,158.6242765625,158.62938828125,158.63347765625,"[158.6345, 158.613953125, 158.592734375, 158.623140625, 158.5865, 158.597484375, 158.605109375, 158.59528125, 158.598875, 158.58453125]",tokens/s,0.3972176832209664,kWh,0.0018724184694555073,0.0010262522225316024,0.008901508065645191,0.0118001787576323,tokens/kWh,5338.902172075308,,s,629,1607.7030268554693,2.555966656367995,0.3193652798169538,2.51726953125,2.51903720703125,2.519619970703125,5.2035773632812505,"[2.5194208984375, 2.518276123046875, 2.516651123046875, 2.517525390625, 2.51843896484375, 2.517624755859375, 2.52004150390625, 2.52022265625, 2.521489501953125, 2.5164267578125, 2.5175634765625, 2.517775390625, 2.519953369140625, 2.51852197265625, 2.5173544921875, 2.519920654296875, 2.52021044921875, 2.516950927734375, 2.5166611328125, 2.518200439453125, 2.52010693359375, 2.518486083984375, 2.519525390625, 2.516905029296875, 2.517927001953125, 2.5164677734375, 2.517693359375, 2.518721435546875, 2.518928466796875, 2.51907470703125, 2.519510986328125, 2.518275146484375, 2.517242919921875, 2.516676513671875, 2.5165732421875, 2.5177783203125, 2.517148681640625, 2.51657421875, 2.516912109375, 2.518000732421875, 2.517034912109375, 2.516802490234375, 2.51715185546875, 2.51765966796875, 2.517441650390625, 2.51902783203125, 2.518970458984375, 2.5190830078125, 2.518890380859375, 2.51803857421875, 2.518179931640625, 2.51873486328125, 2.517571533203125, 2.517244873046875, 2.516959228515625, 2.516928466796875, 2.5168681640625, 2.5169306640625, 2.517191650390625, 2.517224365234375, 2.517115966796875, 2.516737060546875, 5.213111328125, 2.516869140625, 2.517271484375, 2.516408203125, 2.51673486328125, 2.51637255859375, 2.5162373046875, 2.516429931640625, 2.5193134765625, 2.518941650390625, 2.519627685546875, 2.5193779296875, 2.51966064453125, 2.51765966796875, 2.51892333984375, 2.516537353515625, 2.517127197265625, 2.519396240234375, 2.5196083984375, 2.518447021484375, 2.51694091796875, 2.517104736328125, 2.516771728515625, 2.51951318359375, 2.519360595703125, 2.519175048828125, 2.5198193359375, 2.5188720703125, 2.516715576171875, 2.51652001953125, 2.516981689453125, 2.51901025390625, 2.517675048828125, 2.516708251953125, 2.51647998046875, 2.51678515625, 2.517172119140625, 2.516953125, 2.51710986328125, 2.517333984375, 2.5174814453125, 2.517621826171875, 2.5172724609375, 2.517373046875, 2.5174833984375, 2.517396484375, 2.51939111328125, 2.517060546875, 2.5173310546875, 2.517473388671875, 2.51749169921875, 2.517325927734375, 2.517126220703125, 2.517371826171875, 2.517205078125, 2.51726953125, 2.51808056640625, 2.517114990234375, 2.51685986328125, 2.516756591796875, 2.517073974609375, 2.51732275390625, 2.51730224609375, 5.20315478515625, 2.5170166015625, 2.517244873046875, 2.517020751953125, 2.517108642578125, 2.51638671875, 2.51642578125, 2.517214111328125, 2.516769775390625, 2.51730029296875, 2.51892529296875, 2.519654296875, 2.518119384765625, 2.5167421875, 2.51654150390625, 2.5173125, 2.51736669921875, 2.51793310546875, 2.5171240234375, 2.51818603515625, 2.51892431640625, 2.5187666015625, 2.51812353515625, 2.51782763671875, 2.5180478515625, 2.517296142578125, 2.516853759765625, 2.517425048828125, 2.5174833984375, 2.5169951171875, 2.516545654296875, 2.517794921875, 2.5171640625, 2.51698388671875, 2.51639501953125, 2.51702685546875, 2.516306884765625, 2.5166806640625, 2.51652001953125, 2.517170166015625, 2.516893798828125, 2.516665283203125, 2.516609130859375, 2.516989013671875, 2.51647998046875, 2.516822021484375, 2.51631201171875, 2.51747216796875, 2.517741455078125, 2.518084716796875, 2.517223388671875, 2.51799951171875, 2.51795654296875, 2.518109130859375, 2.51673388671875, 2.517454833984375, 2.51732177734375, 2.51715283203125, 2.516638671875, 2.517203857421875, 2.516884521484375, 2.51759912109375, 2.519858154296875, 5.20646044921875, 2.519667724609375, 2.520236083984375, 2.519354248046875, 2.519604248046875, 2.51765771484375, 2.518856689453125, 2.5197373046875, 2.51866015625, 2.51622802734375, 2.517433349609375, 2.517887939453125, 2.517296142578125, 2.519244873046875, 2.51947509765625, 2.51691015625, 2.517485595703125, 2.51801708984375, 2.51822998046875, 2.518202392578125, 2.518054931640625, 2.517960693359375, 2.518828125, 2.517103515625, 2.516676513671875, 2.51656494140625, 2.5168916015625, 2.516701171875, 2.51679541015625, 2.516830322265625, 2.517245849609375, 2.51702685546875, 2.517789794921875, 2.5177548828125, 2.517986328125, 2.519099365234375, 2.51782666015625, 2.517256103515625, 2.51816845703125, 2.5177353515625, 2.517129150390625, 2.517339111328125, 2.51709130859375, 2.517085205078125, 2.51757568359375, 2.517266357421875, 2.51751123046875, 2.518306884765625, 2.518222900390625, 2.51692236328125, 2.517740478515625, 2.517210205078125, 2.5171865234375, 2.51708935546875, 2.517200927734375, 2.51718359375, 2.51808349609375, 2.517579833984375, 2.517981201171875, 2.518381591796875, 2.517424072265625, 2.517716064453125, 2.517458984375, 5.20374169921875, 2.517011474609375, 2.51717333984375, 2.51589111328125, 2.516104248046875, 2.51622705078125, 2.51666748046875, 2.51635400390625, 2.516790283203125, 2.516591552734375, 2.517108642578125, 2.5161943359375, 2.516461669921875, 2.515986328125, 2.516514892578125, 2.51587890625, 2.516221923828125, 2.516361328125, 2.517339111328125, 2.51677490234375, 2.517726318359375, 2.519022705078125, 2.518340576171875, 2.516381591796875, 2.5166611328125, 2.5175654296875, 2.51858544921875, 2.518182861328125, 2.519140380859375, 2.516368408203125, 2.5173974609375, 2.516633544921875, 2.51689990234375, 2.516451416015625, 2.51803759765625, 2.519834716796875, 2.518279052734375, 2.516961181640625, 2.5182822265625, 2.51641748046875, 2.51694482421875, 2.5169111328125, 2.51719580078125, 2.5168271484375, 2.516947998046875, 2.5164697265625, 2.5166357421875, 2.51717724609375, 2.516738037109375, 2.5174794921875, 2.5177138671875, 2.51707080078125, 2.517907470703125, 2.51693359375, 2.518024169921875, 2.51669921875, 2.517652587890625, 2.51960009765625, 2.51852294921875, 2.5182841796875, 2.517738525390625, 2.518305908203125, 2.51839794921875, 5.20286328125, 2.518064208984375, 2.5179228515625, 2.51654052734375, 2.517392333984375, 2.517768310546875, 2.517991455078125, 2.516991943359375, 2.51664697265625, 2.516994140625, 2.516967529296875, 2.51681689453125, 2.516999267578125, 2.517242919921875, 2.51738427734375, 2.5165751953125, 2.516220947265625, 2.516633544921875, 2.516949951171875, 2.517053466796875, 2.5165966796875, 2.51759716796875, 2.516681640625, 2.516168701171875, 2.5164677734375, 2.51702978515625, 2.5176904296875, 2.5161298828125, 2.51717333984375, 2.518159423828125, 2.51761962890625, 2.5179013671875, 2.517589111328125, 2.5173984375, 2.516906005859375, 2.516503662109375, 2.516475830078125, 2.51711279296875, 2.519480224609375, 2.5183896484375, 2.5181982421875, 2.518749267578125, 2.518531005859375, 2.518134765625, 2.51865087890625, 2.518466552734375, 2.5166396484375, 2.516759521484375, 2.516642822265625, 2.518162353515625, 2.517697509765625, 2.516461669921875, 2.516862060546875, 2.5173310546875, 2.517142578125, 2.516802490234375, 2.51664599609375, 2.51910546875, 2.520224853515625, 2.518654052734375, 2.517782470703125, 2.517315673828125, 2.517593017578125, 5.2058564453125, 2.517256103515625, 2.516883544921875, 2.516770751953125, 2.517991455078125, 2.51818505859375, 2.517248046875, 2.517381103515625, 2.517410888671875, 2.518013916015625, 2.51770263671875, 2.517663818359375, 2.51745068359375, 2.5174189453125, 2.5167841796875, 2.51723779296875, 2.5164462890625, 2.517445556640625, 2.516367431640625, 2.5164462890625, 2.51627734375, 2.51877685546875, 2.517401611328125, 2.51765771484375, 2.516915283203125, 2.517138427734375, 2.516568115234375, 2.516221923828125, 2.516367431640625, 2.517359619140625, 2.519510986328125, 2.51938720703125, 2.518856689453125, 2.519548828125, 2.51677392578125, 2.5177978515625, 2.517477294921875, 2.518878173828125, 2.5189775390625, 2.518950927734375, 2.518413330078125, 2.5166806640625, 2.516497314453125, 2.517235595703125, 2.518279052734375, 2.5198427734375, 2.51673486328125, 2.516912109375, 2.516906005859375, 2.51755517578125, 2.517419921875, 2.517097412109375, 2.516971435546875, 2.51842138671875, 2.517538818359375, 2.5178203125, 2.5180068359375, 2.517959716796875, 2.517367919921875, 2.517199951171875, 2.5175673828125, 2.517475341796875, 2.51647998046875, 5.206263671875, 2.51768212890625, 2.518013916015625, 2.51671240234375, 2.516947021484375, 2.51635302734375, 2.5160029296875, 2.516082763671875, 2.5166357421875, 2.519185302734375, 2.520660888671875, 2.520238037109375, 2.518698974609375, 2.5163837890625, 2.516863037109375, 2.51846142578125, 2.518802490234375, 2.518381591796875, 2.516673583984375, 2.516651123046875, 2.517413818359375, 2.51720703125, 2.516526123046875, 2.516347900390625, 2.516926513671875, 2.51696630859375, 2.516371337890625, 2.51662548828125, 2.517037109375, 2.518067138671875, 2.51704833984375, 2.517189697265625, 2.516686767578125, 2.516638671875, 2.516578369140625, 2.51708935546875, 2.517098388671875, 2.5169755859375, 2.5169140625, 2.51700830078125, 2.517178466796875, 2.51765771484375, 2.517274658203125, 2.516906982421875, 2.51740576171875, 2.51774462890625, 2.51700830078125, 2.517297119140625, 2.51739453125, 2.518012939453125, 2.516905029296875, 2.51756640625, 2.51799853515625, 2.51736669921875, 2.517075927734375, 2.51706982421875, 2.517170166015625, 2.51795556640625, 2.516641845703125, 2.5179892578125, 2.519339111328125, 2.519236572265625, 2.517425048828125, 5.20979345703125, 2.516677734375, 2.51662939453125, 2.516441162109375, 2.51880029296875, 2.516022216796875, 2.51607763671875, 2.516926513671875, 2.5178603515625, 2.516306884765625, 2.5163828125, 2.51637353515625, 2.517161865234375, 2.516613037109375, 2.516834228515625, 2.51662548828125, 2.517593994140625, 2.515980224609375, 2.5162177734375, 2.516642822265625, 2.517887939453125, 2.51702587890625, 2.517031005859375, 2.517222412109375, 2.517981201171875, 2.517895263671875, 2.51740576171875, 2.517854248046875, 2.5180498046875, 2.51757373046875, 2.518212646484375, 2.517984375, 2.518205322265625, 2.51793408203125, 2.51681689453125, 2.5167216796875, 2.517544921875, 2.516486083984375, 2.5170185546875, 2.517210205078125, 2.517508056640625, 2.51639697265625, 2.516465576171875, 2.517015625, 2.5180283203125, 2.51659375, 2.516681640625, 2.51827099609375, 2.518345703125, 2.5169111328125, 2.51719580078125, 2.51778759765625, 2.520734619140625, 2.519320556640625, 2.5171875, 2.517358642578125, 2.51997900390625, 2.518500244140625, 2.516546630859375, 2.517432373046875, 2.5194833984375, 2.519772216796875, 2.519079833984375, 5.2087294921875, 2.517329833984375, 2.5174794921875, 2.516798583984375, 2.51810302734375, 2.5172890625, 2.51738818359375, 2.5166357421875, 2.51726953125, 2.516717529296875, 2.5166181640625, 2.517689453125, 2.51707177734375, 2.516453369140625, 2.516779052734375, 2.516828125, 2.516937744140625, 2.517130126953125, 2.516748291015625, 2.516958251953125, 2.517098388671875, 2.517117919921875, 2.516738037109375, 2.51713330078125, 2.516843505859375, 2.516346923828125, 2.516989013671875, 2.517284912109375, 2.516630615234375, 2.516404296875, 2.516778076171875, 2.51865185546875, 2.518513671875, 2.51744677734375, 2.517791748046875, 2.51757470703125, 2.516971435546875, 2.51658740234375, 2.51664794921875, 2.5177119140625, 2.517885986328125, 2.51691015625, 2.51734326171875, 2.516971435546875, 2.517379150390625, 2.5163837890625, 2.516974609375, 2.516904052734375, 2.517392333984375, 2.51668798828125, 2.517559326171875, 2.51702587890625, 2.517056396484375, 2.5166611328125, 2.517138427734375, 2.517266357421875, 2.517547119140625, 2.51685693359375, 2.517096435546875, 2.5175234375, 2.51769140625, 2.51662744140625, 2.51719580078125]",tokens/s,0.3912414105671436,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3037-169961507d96042f1d50be1c;9c56a323-2544-400c-b183-579825aa9fa2) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,2452.258816,7298.613248,0.0,6652.166144,6323.221504,s,10,7.6798339843750005,0.7679833984375,0.0025974055850476797,0.7679208068847656,0.7711121887207032,0.7721169158935547,0.772920697631836,"[0.7685753173828125, 0.7731216430664063, 0.7647974853515624, 0.7675167846679688, 0.76627783203125, 0.7641251220703125, 0.7666953735351563, 0.7683248291015625, 0.770888916015625, 0.7695106811523438]",tokens/s,333.34053902837564,kWh,9.039419593792114e-06,4.95321018510627e-06,4.32610068310048e-05,5.725363660990319e-05,tokens/kWh,4471331.694513176,MB,2452.258816,7298.613248,0.0,6652.166144,6382.564864,s,10,455.74574609375,45.57457460937499,0.015546779743152406,45.57488671875,45.595179296874996,45.5973923828125,45.5991628515625,"[45.5867109375, 45.5946875, 45.59960546875, 45.58337109375, 45.55990625, 45.5529375, 45.558640625, 45.56011328125, 45.5776015625, 45.572171875]",tokens/s,1.3823497101175461,kWh,0.000537889561981909,0.0002948106017116671,0.0025152069288305857,0.0033479070925241613,tokens/kWh,18817.726495660016,,s,629,461.93301361083974,0.7343927084433065,0.09125959398126374,0.7233239135742188,0.7243169799804687,0.7247333496093751,1.4901356689453125,"[0.7238604736328125, 0.7241830444335937, 0.7225497436523437, 0.7234365234375, 0.7223142700195313, 0.7223203735351562, 0.7245137939453125, 0.7236771850585938, 0.7226992797851562, 0.723535888671875, 0.7234600830078125, 0.7246929931640625, 0.7229706420898437, 0.7237723999023438, 0.7233392944335938, 0.7235133666992187, 0.7225231323242187, 0.7234631958007812, 0.723367919921875, 0.7226572875976562, 0.7243929443359375, 0.7232921752929687, 0.7236546630859375, 0.7240488891601562, 0.7235286865234375, 0.7245383911132812, 0.7233167114257812, 0.7230576782226562, 0.7237959594726563, 0.7243140869140625, 0.722629638671875, 0.7226122436523438, 0.7224381713867187, 0.7233873901367187, 0.7249028930664062, 0.7240294189453125, 0.723904541015625, 0.7241912231445312, 0.7243069458007813, 0.7245864868164062, 0.7234006958007813, 0.723114990234375, 0.7231324462890625, 0.7226593017578125, 0.7241062622070312, 0.7237662963867187, 0.7239597778320312, 0.724094970703125, 0.7240611572265625, 0.7237017822265625, 0.7238123779296874, 0.72437451171875, 0.7245363159179687, 0.7248076782226562, 0.7224637451171875, 0.7232000122070312, 0.7239700317382812, 0.7240325317382813, 0.7229603881835938, 0.7228866577148437, 0.7229910888671875, 0.7245496215820313, 1.497416748046875, 0.7238062133789063, 0.7237662963867187, 0.7238911743164063, 0.7240550537109375, 0.7237857055664062, 0.7238358764648437, 0.7228784790039062, 0.72378369140625, 0.7233925170898438, 0.7229235229492188, 0.722534423828125, 0.7236341552734376, 0.723399658203125, 0.7241830444335937, 0.7228897094726563, 0.7232071533203125, 0.7245209350585937, 0.7240325317382813, 0.7224688720703125, 0.7228262329101562, 0.7225323486328125, 0.7236700439453125, 0.7239026489257813, 0.72288037109375, 0.7225128784179687, 0.7245783081054687, 0.7235983276367187, 0.7235532836914063, 0.725017578125, 0.724094970703125, 0.7250708618164062, 0.7244400634765625, 0.7243571166992188, 0.7240745239257812, 0.7232041015625, 0.7236218872070312, 0.7237877807617188, 0.7239588012695313, 0.7241809692382812, 0.7240929565429688, 0.7230279541015625, 0.7229634399414062, 0.7229419555664063, 0.72318359375, 0.7240601806640625, 0.7232184448242187, 0.7226746826171875, 0.7235880737304687, 0.724453369140625, 0.7243407592773438, 0.7238184814453125, 0.7240242919921875, 0.7247308959960937, 0.7247349853515626, 0.7235543212890625, 0.7233402709960938, 0.7242977294921875, 0.7243202514648438, 0.723862548828125, 0.724084716796875, 0.723768310546875, 0.7227473754882813, 1.490913330078125, 0.7229655151367187, 0.7226480712890625, 0.7240775756835938, 0.7235686645507813, 0.7231815795898437, 0.72302490234375, 0.7227381591796875, 0.7236761474609374, 0.724653076171875, 0.724200439453125, 0.7239618530273437, 0.7246776123046875, 0.7250585327148438, 0.7243140869140625, 0.7238225708007813, 0.7248977661132813, 0.725359619140625, 0.7238379516601563, 0.7237171020507812, 0.724158447265625, 0.7239188232421875, 0.7248445434570312, 0.7251189575195313, 0.7247544555664063, 0.7250022583007812, 0.7248936767578125, 0.7236198120117188, 0.7234979858398437, 0.723462158203125, 0.7234559936523437, 0.724263916015625, 0.7227955322265625, 0.7225702514648438, 0.7241932983398438, 0.7234805908203125, 0.7234949340820312, 0.72275146484375, 0.7225385131835937, 0.7228804931640626, 0.7242403564453125, 0.7236218872070312, 0.7236986694335937, 0.723472412109375, 0.724126708984375, 0.7239505615234375, 0.7238901977539063, 0.7237283935546875, 0.7242025146484375, 0.724116455078125, 0.7241031494140625, 0.72374169921875, 0.7228671875, 0.7233228759765625, 0.7237857055664062, 0.7240171508789063, 0.72399462890625, 0.7229276123046875, 0.7241410522460937, 0.7236751098632812, 0.7226521606445313, 0.7233648681640625, 0.7235952758789063, 1.49184814453125, 0.7231047973632813, 0.7224780883789063, 0.7224258422851563, 0.72266650390625, 0.7234283447265625, 0.72353076171875, 0.7227269287109375, 0.7243991088867188, 0.7234119873046875, 0.72285595703125, 0.7229951782226562, 0.7234692993164062, 0.7242373046875, 0.7239915771484375, 0.7242465209960938, 0.7243253784179687, 0.7225200805664063, 0.7226972045898438, 0.7227289428710938, 0.7236956176757813, 0.7238881225585938, 0.7240274047851563, 0.7241492309570312, 0.7236741333007812, 0.7231661987304687, 0.723114990234375, 0.7243571166992188, 0.7238656005859375, 0.723240966796875, 0.7226122436523438, 0.7238615112304687, 0.7242168579101562, 0.7235440673828125, 0.7236239624023437, 0.723926025390625, 0.7243919067382812, 0.7229173583984375, 0.7226542358398438, 0.7227238159179687, 0.7229859619140625, 0.7234990234375, 0.7239567260742188, 0.7231416015625, 0.7239454956054687, 0.7230084838867188, 0.7233607788085937, 0.7238359375, 0.7240099487304688, 0.7241431274414063, 0.7246141357421875, 0.7241359252929688, 0.7245772705078125, 0.7245137939453125, 0.725411865234375, 0.723578857421875, 0.7241543579101563, 0.7230054321289062, 0.7233526000976562, 0.7226992797851562, 0.7232420043945312, 0.7230075073242187, 0.7230187377929688, 1.490323486328125, 0.7229685668945313, 0.723535888671875, 0.72338330078125, 0.723409912109375, 0.7233341674804687, 0.7232112426757813, 0.72279345703125, 0.7230853271484375, 0.7230105590820313, 0.7226060791015625, 0.723346435546875, 0.7229296875, 0.72382568359375, 0.7241666259765625, 0.72303515625, 0.7230812377929687, 0.7226583251953125, 0.7230535888671875, 0.7229890747070312, 0.7228118896484375, 0.7222753295898438, 0.7228641357421876, 0.722740234375, 0.72254052734375, 0.7228671875, 0.7227391967773438, 0.7233505249023438, 0.722619384765625, 0.72300439453125, 0.7234150390625, 0.7230556030273437, 0.7235297241210937, 0.722819091796875, 0.723610595703125, 0.7228836059570313, 0.7235348510742188, 0.7226911010742187, 0.7227914428710938, 0.7233935546875, 0.7231590576171875, 0.7238717651367188, 0.72331982421875, 0.7227811889648438, 0.7233136596679688, 0.7229542236328125, 0.7231754150390625, 0.7233607788085937, 0.7233925170898438, 0.7229081420898438, 0.7231047973632813, 0.7224483642578124, 0.723252197265625, 0.722640869140625, 0.7238778686523437, 0.7232604370117187, 0.7231641845703125, 0.7244615478515625, 0.7236874389648438, 0.72321533203125, 0.7241400146484375, 0.7235686645507813, 0.7226695556640625, 1.48693603515625, 0.7224832153320313, 0.7234774780273437, 0.7231426391601562, 0.724173828125, 0.7240755004882813, 0.7229542236328125, 0.72241357421875, 0.7228876953125, 0.7233106079101562, 0.7236618041992188, 0.7231641845703125, 0.7225897216796875, 0.7224903564453125, 0.7225599975585938, 0.7225702514648438, 0.7227361450195312, 0.7226112060546875, 0.7231498413085937, 0.723304443359375, 0.722703369140625, 0.7227955322265625, 0.7233474731445313, 0.7234703369140625, 0.7267368774414062, 0.7227781982421875, 0.7225128173828125, 0.7224033203125, 0.7225589599609376, 0.7226634521484375, 0.7223306274414063, 0.7225364990234375, 0.7230811767578125, 0.72255078125, 0.7227003173828125, 0.7224832153320313, 0.7223961791992187, 0.7230310668945312, 0.7257907104492187, 0.7230904541015625, 0.7228538818359375, 0.7229788208007812, 0.7225180053710938, 0.7223818359375, 0.7229481201171875, 0.7229378662109375, 0.72367822265625, 0.7230084838867188, 0.7230146484375, 0.7226326904296875, 0.7231416015625, 0.7237335205078125, 0.7241860961914063, 0.7245127563476562, 0.722572265625, 0.7229020385742188, 0.72300341796875, 0.72266650390625, 0.7228159790039063, 0.7224207153320312, 0.7239270629882812, 0.72285693359375, 0.722966552734375, 1.4896527099609376, 0.7232348022460937, 0.7239567260742188, 0.7227739868164063, 0.723078125, 0.7226429443359375, 0.7226798095703125, 0.7228703002929687, 0.7228917846679688, 0.7229112548828125, 0.722935791015625, 0.7229030151367187, 0.7226050415039063, 0.7226798095703125, 0.7230105590820313, 0.7234058227539063, 0.7233239135742188, 0.7225077514648437, 0.7225938110351563, 0.7227412719726563, 0.7225303344726562, 0.72310888671875, 0.7248046264648438, 0.7248180541992187, 0.723907470703125, 0.72346826171875, 0.7232081909179687, 0.7231743774414062, 0.7237877807617188, 0.7234160766601563, 0.7235860595703125, 0.7227647705078125, 0.7225620727539063, 0.7228334350585938, 0.7226992797851562, 0.7227996215820313, 0.7227381591796875, 0.7233106079101562, 0.723267578125, 0.7230556030273437, 0.7226695556640625, 0.723061767578125, 0.7235317993164062, 0.7233689575195312, 0.7238010864257812, 0.7234078979492188, 0.7232849731445312, 0.7231876831054688, 0.7232327880859375, 0.7230422973632813, 0.7230341186523438, 0.7235563354492187, 0.7233157348632813, 0.7235266723632813, 0.723040283203125, 0.7234692993164062, 0.7238154296875, 0.7239547119140625, 0.7235758056640625, 0.7227924194335937, 0.7224013061523438, 0.7225753784179687, 0.7224832153320313, 1.4909122314453125, 0.7237437744140625, 0.7234396362304687, 0.7226849365234375, 0.7229102172851563, 0.7227125854492188, 0.7236638793945313, 0.7237918701171875, 0.722882568359375, 0.7225845947265624, 0.7231959228515625, 0.7231826171875, 0.722893798828125, 0.7226316528320312, 0.7233382568359376, 0.7234171142578125, 0.7229818725585937, 0.722428955078125, 0.7228836059570313, 0.722682861328125, 0.7232973022460938, 0.7227125854492188, 0.7229685668945313, 0.723040283203125, 0.7242997436523437, 0.7234037475585937, 0.7232604370117187, 0.7234345092773438, 0.72426904296875, 0.7237816162109375, 0.7232747802734375, 0.7224575805664063, 0.7224043579101562, 0.7228845825195312, 0.7226439819335938, 0.7229317016601563, 0.722651123046875, 0.7223142700195313, 0.7236024169921875, 0.7237283935546875, 0.7242454833984375, 0.72339453125, 0.7232337646484375, 0.7232767944335937, 0.7239905395507813, 0.7230320434570312, 0.7228037109375, 0.7227996215820313, 0.722904052734375, 0.7226705932617188, 0.7241994018554687, 0.7242670288085937, 0.7234703369140625, 0.723314697265625, 0.7229102172851563, 0.722967529296875, 0.7240089721679688, 0.723979248046875, 0.7236167602539062, 0.7229450073242187, 0.7226654663085937, 0.7226122436523438, 0.722820068359375, 1.495736328125, 0.724738037109375, 0.7240570678710937, 0.72363623046875, 0.72394140625, 0.7237017822265625, 0.7253534545898438, 0.7251988525390625, 0.7241973876953125, 0.7238829956054688, 0.7238615112304687, 0.7239464721679687, 0.7240714111328125, 0.7241809692382812, 0.7237929077148437, 0.72289892578125, 0.7232706298828125, 0.722914306640625, 0.7235338134765625, 0.7231948852539063, 0.7239393310546876, 0.7230525512695313, 0.7231815795898437, 0.7229603881835938, 0.7230853271484375, 0.72344677734375, 0.7233054809570313, 0.72283544921875, 0.7226583251953125, 0.7230003051757813, 0.72287744140625, 0.722608154296875, 0.7231324462890625, 0.7233526000976562, 0.7234529418945312, 0.72300439453125, 0.7229020385742188, 0.7233484497070313, 0.723610595703125, 0.7233925170898438, 0.72308837890625, 0.7229317016601563, 0.7229389038085937, 0.7231682739257812, 0.7229910888671875, 0.7232286987304688, 0.72271875, 0.723019775390625, 0.723493896484375, 0.72239208984375, 0.722845703125, 0.7230084838867188, 0.7235686645507813, 0.7236188354492188, 0.7237345581054687, 0.72363623046875, 0.7234805908203125, 0.7234385986328125, 0.7238164672851563, 0.724041748046875, 0.723356689453125, 0.723030029296875, 0.7237140502929688, 1.4925987548828126, 0.7230996704101562, 0.72384716796875, 0.7239147338867188, 0.7256944580078125, 0.7247103881835938, 0.7244021606445312, 0.7243776245117187, 0.724316162109375, 0.7242546997070313, 0.7244994506835938, 0.724832275390625, 0.7239239501953125, 0.723041259765625, 0.7240745239257812, 0.7239096069335937, 0.723515380859375, 0.7226603393554687, 0.7232348022460937, 0.7224627075195312, 0.7226132202148438, 0.7228047485351563, 0.72265625, 0.7226470947265625, 0.7226756591796875, 0.7231713256835938, 0.72317236328125, 0.722787353515625, 0.7223971557617187, 0.7228057861328125, 0.7233065185546875, 0.7235245971679688, 0.7238748168945313, 0.72374169921875, 0.7234816284179687, 0.7235297241210937, 0.7231426391601562, 0.7237867431640626, 0.7235635375976562, 0.7232808837890625, 0.72345703125, 0.7230924682617188, 0.72296142578125, 0.7232747802734375, 0.7239485473632813, 0.724284423828125, 0.722783203125, 0.7224873046875, 0.7223971557617187, 0.7222200927734375, 0.7224954833984375, 0.7242076416015625, 0.722787353515625, 0.7238604736328125, 0.7227545776367188, 0.7224268798828125, 0.7232020263671874, 0.7240253295898438, 0.722998291015625, 0.7229900512695312, 0.72289892578125, 0.7227340698242187, 0.7230996704101562]",tokens/s,1.36166929287697,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,main,False,False,MB,1571.749888,5448.925184,0.0,4802.47808,4489.12128,s,10,5.0903373718261715,0.5090337371826171,0.0024963690031113454,0.5090882415771485,0.5114007537841796,0.5133009628295898,0.514821130065918,"[0.515201171875, 0.5058653564453125, 0.5075380249023438, 0.5109784851074218, 0.5074734191894531, 0.5066469421386719, 0.5090344543457032, 0.5092752685546875, 0.5091822204589844, 0.5091420288085937]",tokens/s,502.91362104386286,kWh,5.98198832737075e-06,3.277856253707796e-06,2.697877158300277e-05,3.623861616408132e-05,tokens/kWh,7064287.412104325,MB,1571.749888,5448.925184,0.0,4802.47808,4557.793792,s,10,299.8803828125,29.98803828125,0.03808075694133287,29.9707392578125,30.046216015625,30.058939062500002,30.0691175,"[29.972552734375, 30.043388671875, 30.071662109375, 30.010330078125, 29.96, 29.95865625, 29.96925, 29.972228515625, 29.963451171875, 29.95886328125]",tokens/s,2.1008376543053404,kWh,0.00035374844759702685,0.00019388459414373753,0.001556149911585597,0.0021037829533263617,tokens/kWh,29946.05498651303,,s,629,303.97862612915014,0.48327285553124066,0.06045359810307915,0.47556915283203127,0.47775867309570313,0.47801220703124997,0.9833881201171876,"[0.4750715026855469, 0.4762265625, 0.4773417053222656, 0.47625729370117187, 0.47571148681640624, 0.47557223510742186, 0.47629312133789065, 0.47471615600585937, 0.475789306640625, 0.47461376953125, 0.4746967163085937, 0.474913818359375, 0.47592657470703126, 0.47551071166992187, 0.47595623779296875, 0.47574118041992186, 0.4750817260742187, 0.47586611938476564, 0.4771154479980469, 0.47594082641601565, 0.4757462768554688, 0.47569818115234375, 0.4764293212890625, 0.475978759765625, 0.47632794189453126, 0.47545343017578123, 0.47498138427734377, 0.47522305297851564, 0.47550360107421874, 0.47533575439453124, 0.4750652770996094, 0.4751790161132812, 0.47525888061523436, 0.4754544677734375, 0.4752803955078125, 0.4756971435546875, 0.4763504638671875, 0.4758845520019531, 0.47809332275390626, 0.475462646484375, 0.47741644287109375, 0.47680307006835937, 0.47630642700195314, 0.47653070068359377, 0.47590911865234375, 0.4751380615234375, 0.47556710815429687, 0.475146240234375, 0.47564901733398435, 0.4757647399902344, 0.47530291748046877, 0.4750807189941406, 0.47518719482421873, 0.47629925537109374, 0.4754646911621094, 0.4754155578613281, 0.4753121337890625, 0.4750469055175781, 0.47505612182617185, 0.4759122009277344, 0.47506842041015623, 0.4758763427734375, 0.9829324951171875, 0.4772618103027344, 0.47679385375976563, 0.4752302551269531, 0.47515850830078127, 0.4750141296386719, 0.4753879089355469, 0.47504486083984376, 0.47488204956054686, 0.47477862548828126, 0.4748114013671875, 0.47600946044921877, 0.4750940246582031, 0.4750837707519531, 0.4750745544433594, 0.4750592041015625, 0.47484109497070315, 0.4761292724609375, 0.4751933288574219, 0.4750182495117187, 0.475357177734375, 0.47510528564453125, 0.4762142639160156, 0.47762432861328125, 0.4771512451171875, 0.478097412109375, 0.47743179321289064, 0.4781097106933594, 0.4774246520996094, 0.47769497680664064, 0.4778270568847656, 0.4778741760253906, 0.47795404052734375, 0.47758233642578124, 0.47754238891601564, 0.47758438110351564, 0.4775854187011719, 0.4776581115722656, 0.4781363220214844, 0.47784140014648435, 0.4785827941894531, 0.4778670043945312, 0.478013427734375, 0.4813271179199219, 0.4787435607910156, 0.4778823547363281, 0.4775577697753906, 0.47766937255859376, 0.4776509399414062, 0.47802163696289063, 0.4775925903320313, 0.4771829833984375, 0.47707955932617185, 0.4779130859375, 0.47752191162109375, 0.4774225769042969, 0.47764581298828124, 0.47809228515625, 0.47725054931640626, 0.47956683349609375, 0.47532647705078124, 0.47634738159179685, 0.4757237854003906, 0.9846497192382813, 0.4779151306152344, 0.47775845336914063, 0.4778516540527344, 0.4773468017578125, 0.47776461791992186, 0.47770932006835937, 0.4769669189453125, 0.4777676696777344, 0.4777635803222656, 0.47618765258789064, 0.4749752197265625, 0.4755538024902344, 0.4747980651855469, 0.4747796630859375, 0.4748114013671875, 0.47680514526367185, 0.4753622741699219, 0.47532440185546876, 0.4750438537597656, 0.4750325622558594, 0.47742669677734373, 0.4790394897460937, 0.47991500854492186, 0.4776028137207031, 0.4770672607421875, 0.47781991577148436, 0.4773304443359375, 0.4778946533203125, 0.4778496398925781, 0.4774788818359375, 0.4779632568359375, 0.47771136474609377, 0.47775640869140623, 0.47768267822265625, 0.47796121215820314, 0.4775454711914062, 0.4780103759765625, 0.4779438171386719, 0.4773253173828125, 0.4780001220703125, 0.47749530029296877, 0.478482421875, 0.4767160339355469, 0.47870156860351565, 0.47761203002929686, 0.47860427856445314, 0.47783221435546874, 0.4779366149902344, 0.4776212463378906, 0.4775301208496094, 0.477328369140625, 0.478376953125, 0.4772362365722656, 0.47751168823242185, 0.47775955200195314, 0.47782699584960936, 0.47732632446289064, 0.47806362915039063, 0.4772454528808594, 0.4775782775878906, 0.47722698974609373, 0.477765625, 0.9892290649414063, 0.47758950805664063, 0.47766937255859376, 0.47711026000976564, 0.4785479736328125, 0.4779674072265625, 0.47848751831054687, 0.4773918762207031, 0.47714407348632815, 0.475968505859375, 0.47733248901367187, 0.4777830505371094, 0.4774912109375, 0.4774410095214844, 0.4776990661621094, 0.47711026000976564, 0.47647540283203127, 0.4762900390625, 0.47571044921875, 0.47531622314453126, 0.47745126342773436, 0.4778526611328125, 0.4754595947265625, 0.4753295593261719, 0.47519024658203124, 0.47728536987304687, 0.4776847229003906, 0.47796734619140624, 0.47679693603515627, 0.4769525756835937, 0.47623678588867185, 0.47598489379882813, 0.47588760375976563, 0.47573504638671876, 0.4752127990722656, 0.47496600341796874, 0.4763709411621094, 0.47549542236328124, 0.4751493225097656, 0.4747591552734375, 0.47531109619140627, 0.47625933837890627, 0.47604122924804687, 0.4752691345214844, 0.478866455078125, 0.4751441650390625, 0.4752322692871094, 0.47499981689453125, 0.47643954467773436, 0.4755630187988281, 0.4757166137695312, 0.47565005493164064, 0.476015625, 0.4754155578613281, 0.4754124755859375, 0.47525070190429686, 0.47504281616210936, 0.4759347229003906, 0.47554763793945315, 0.47518923950195313, 0.47525274658203126, 0.475404296875, 0.47679489135742187, 0.9835653076171875, 0.4750100708007812, 0.47497518920898435, 0.47521792602539065, 0.47529168701171876, 0.47549435424804687, 0.47515341186523435, 0.47609548950195313, 0.47539712524414063, 0.47541351318359376, 0.47511859130859374, 0.47484622192382814, 0.47537664794921874, 0.475104248046875, 0.47634738159179685, 0.4752896118164063, 0.47548724365234374, 0.4761077880859375, 0.4751452026367188, 0.4771768188476562, 0.4755333251953125, 0.4755230712890625, 0.4750796813964844, 0.4751923217773438, 0.4752496643066406, 0.4748308410644531, 0.47501516723632814, 0.47498751831054686, 0.4759531555175781, 0.4754883117675781, 0.47538067626953123, 0.4753909912109375, 0.47539712524414063, 0.4753735656738281, 0.475494384765625, 0.47589376831054686, 0.4761006164550781, 0.47518206787109374, 0.4750602111816406, 0.47489434814453124, 0.47634228515625, 0.47604940795898437, 0.47567974853515627, 0.47519744873046876, 0.4751749267578125, 0.47551077270507813, 0.4754002685546875, 0.47505914306640623, 0.47493426513671877, 0.475788330078125, 0.4752465515136719, 0.47501516723632814, 0.47556915283203127, 0.4761507873535156, 0.47565823364257814, 0.47601458740234376, 0.47671194458007815, 0.478308349609375, 0.4755906677246094, 0.47542578125, 0.4754708557128906, 0.4771061706542969, 0.4754810791015625, 0.9827368774414063, 0.47548416137695315, 0.4752916564941406, 0.4750274658203125, 0.4750274658203125, 0.47477658081054686, 0.4755199890136719, 0.47510833740234376, 0.4752332763671875, 0.47511962890625, 0.4750837707519531, 0.47506329345703124, 0.4751718444824219, 0.477048828125, 0.4756756591796875, 0.475315185546875, 0.47508685302734377, 0.474925048828125, 0.476084228515625, 0.4754022521972656, 0.47531417846679686, 0.47490765380859373, 0.47505612182617185, 0.4752547912597656, 0.47493939208984376, 0.4751216735839844, 0.4752363586425781, 0.47553741455078125, 0.47550872802734373, 0.4752025451660156, 0.47503768920898437, 0.47521588134765624, 0.47512063598632814, 0.4751523742675781, 0.4767621154785156, 0.4764487609863281, 0.47532235717773436, 0.4752414855957031, 0.47862374877929686, 0.47638223266601565, 0.47562542724609375, 0.47544525146484373, 0.47627877807617186, 0.47506329345703124, 0.47538995361328124, 0.4754227294921875, 0.47551693725585936, 0.4754288635253906, 0.47596337890625, 0.4758896789550781, 0.4756899719238281, 0.4755988464355469, 0.4755916748046875, 0.4752066650390625, 0.4755640258789062, 0.47575653076171875, 0.47548416137695315, 0.4752998962402344, 0.4755332336425781, 0.4752363586425781, 0.4765552673339844, 0.4758056945800781, 0.47643853759765625, 0.9858508911132813, 0.4760975341796875, 0.4763627624511719, 0.47618048095703125, 0.47583026123046873, 0.4758814697265625, 0.47627365112304687, 0.47550360107421874, 0.4748226623535156, 0.4754380798339844, 0.4754176025390625, 0.47634841918945314, 0.47666278076171875, 0.4759449462890625, 0.47538177490234373, 0.475430908203125, 0.4754606018066406, 0.4765736999511719, 0.47562957763671876, 0.47619277954101563, 0.4762623901367187, 0.47586407470703124, 0.4763156433105469, 0.47800421142578126, 0.47563058471679687, 0.47566949462890623, 0.4764661865234375, 0.47568487548828126, 0.4763525085449219, 0.47546881103515626, 0.4754503784179688, 0.47535821533203126, 0.4751155090332031, 0.476685302734375, 0.4755333251953125, 0.4750540771484375, 0.4750325622558594, 0.4753387451171875, 0.47632281494140627, 0.47543603515625, 0.47514727783203126, 0.47536947631835935, 0.4750110778808594, 0.4750960693359375, 0.47505816650390625, 0.47526605224609375, 0.47514215087890627, 0.47597158813476564, 0.47550054931640623, 0.47509503173828127, 0.47519845581054687, 0.4751452026367188, 0.4753704833984375, 0.474967041015625, 0.47536639404296877, 0.47516876220703125, 0.47490457153320315, 0.4751523742675781, 0.47530905151367187, 0.4764610595703125, 0.47634228515625, 0.47607601928710935, 0.4758845520019531, 0.9857208251953125, 0.4755548095703125, 0.47583026123046873, 0.4755302734375, 0.4761640625, 0.4756316223144531, 0.475904052734375, 0.4751758728027344, 0.47539813232421874, 0.475536376953125, 0.4755210266113281, 0.475514892578125, 0.4755937805175781, 0.47548818969726564, 0.47541656494140627, 0.47544216918945315, 0.477454345703125, 0.4756387939453125, 0.4758138732910156, 0.47603302001953124, 0.47580465698242186, 0.47644158935546876, 0.4751994934082031, 0.47532339477539065, 0.4750745544433594, 0.47546981811523437, 0.4754565124511719, 0.47503768920898437, 0.47527935791015624, 0.4750858154296875, 0.4753018798828125, 0.4747683715820312, 0.4749916076660156, 0.47602789306640625, 0.4755906677246094, 0.4751790161132812, 0.47570123291015626, 0.477876220703125, 0.477338623046875, 0.4760637512207031, 0.4767999877929687, 0.47610983276367186, 0.47703347778320315, 0.47675802612304685, 0.47620712280273436, 0.4752158508300781, 0.47609036254882814, 0.4771328125, 0.4762552185058594, 0.4760504455566406, 0.47554458618164064, 0.475283447265625, 0.47543603515625, 0.4751523742675781, 0.475335693359375, 0.4752138366699219, 0.47528140258789064, 0.475109375, 0.47681332397460935, 0.47562240600585937, 0.47532339477539065, 0.47533465576171874, 0.47510833740234376, 0.984848388671875, 0.4750960693359375, 0.47506024169921873, 0.4759930419921875, 0.47555789184570313, 0.4750254211425781, 0.47533978271484373, 0.4758394775390625, 0.475610107421875, 0.47590911865234375, 0.4757596130371094, 0.4766791687011719, 0.4766371765136719, 0.4757176208496094, 0.4764549255371094, 0.4773990478515625, 0.4758917236328125, 0.476790771484375, 0.47595416259765627, 0.4752404479980469, 0.47515545654296876, 0.475030517578125, 0.4752916564941406, 0.4751278076171875, 0.4755138549804688, 0.47549234008789065, 0.47554150390625, 0.4758425598144531, 0.47556607055664063, 0.4752015380859375, 0.47505612182617185, 0.4750540771484375, 0.47509811401367186, 0.4751697998046875, 0.4750817260742187, 0.4752066650390625, 0.4769587097167969, 0.4757596130371094, 0.47552410888671875, 0.4753387451171875, 0.4751247253417969, 0.47600946044921877, 0.47491787719726564, 0.47531417846679686, 0.47513394165039063, 0.4757739562988281, 0.4750469055175781, 0.475030517578125, 0.4758507385253906, 0.4755599365234375, 0.47600741577148437, 0.4752414855957031, 0.4751247253417969, 0.47610470581054687, 0.4753530883789063, 0.475404296875, 0.4752005004882813, 0.476626953125, 0.4756654052734375, 0.47670578002929687, 0.4761466979980469, 0.4752547912597656, 0.4752209777832031, 0.9850972290039063, 0.47500698852539064, 0.47586099243164065, 0.475536376953125, 0.4751933288574219, 0.4753070068359375, 0.47523434448242186, 0.475157470703125, 0.4753039245605469, 0.4752762756347656, 0.4760606689453125, 0.47565216064453125, 0.4750796203613281, 0.47518106079101563, 0.4763607177734375, 0.47558758544921875, 0.47543499755859375, 0.47558349609375, 0.4752384033203125, 0.47536639404296877, 0.47527423095703125, 0.47547494506835936, 0.4750335998535156, 0.4753049621582031, 0.47516571044921874, 0.4754155578613281, 0.4772812805175781, 0.4751769714355469, 0.4758814697265625, 0.4761343994140625, 0.4757074279785156, 0.4766033630371094, 0.47504281616210936, 0.47508480834960937, 0.4752629699707031, 0.47697714233398436, 0.4764979248046875, 0.4756357421875, 0.4753837890625, 0.4751473083496094, 0.475273193359375, 0.475177978515625, 0.47527835083007813, 0.4753541259765625, 0.4753623046875, 0.4753930358886719, 0.4750796813964844, 0.47559576416015625, 0.47536334228515625, 0.4764241943359375, 0.47548724365234374, 0.47530087280273436, 0.47603302001953124, 0.4752906188964844, 0.47511962890625, 0.47501516723632814, 0.4768563232421875, 0.4758026123046875, 0.47588864135742187, 0.4753950805664062, 0.4752138366699219, 0.47535000610351563, 0.4750120849609375]",tokens/s,2.069224432025556,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,3984.965632,12732.33408,0.0,12085.886976,11337.364992,s,10,10.92237109375,1.092237109375,0.0021117743883837157,1.0918812255859374,1.0945805541992188,1.0956890197753906,1.0965757922363282,"[1.0967974853515625, 1.094334228515625, 1.0893026123046874, 1.090765625, 1.0907347412109376, 1.09039990234375, 1.0912408447265625, 1.09275048828125, 1.0935235595703126, 1.0925216064453125]",tokens/s,234.38134247836382,kWh,1.2875690162181854e-05,7.054504655134223e-06,6.32269116926043e-05,8.315710650992037e-05,tokens/kWh,3078510.1928655975,MB,3984.965632,12732.33408,0.0,12085.886976,11686.79936,s,10,637.5050507812499,63.75050507812499,0.0049395574759148195,63.749460937500004,63.757564453125,63.759594726562504,63.7612189453125,"[63.74835546875, 63.7478046875, 63.75041796875, 63.7437265625, 63.75711328125, 63.74664453125, 63.761625, 63.75044140625, 63.74853515625, 63.75038671875]",tokens/s,0.9882274645949038,kWh,0.0007525312550531495,0.00041245463154615205,0.0037184816692274027,0.004883467555826704,tokens/kWh,12900.669305119396,,s,629,646.3186791381836,1.027533671125888,0.12973576665422035,1.0118389892578126,1.012325793457031,1.0126516357421875,2.103177431640625,"[1.0117550048828126, 1.0119987182617187, 1.0120816650390625, 1.0120765991210938, 1.0125403442382812, 1.011704833984375, 1.0115768432617187, 1.0117868041992188, 1.01175390625, 1.0114559936523437, 1.0114959106445311, 1.0115798950195312, 1.0117611694335937, 1.01186865234375, 1.0123243408203124, 1.0116085815429687, 1.01121435546875, 1.0115245971679687, 1.0116761474609375, 1.011673095703125, 1.0115952758789062, 1.0115348510742188, 1.011820556640625, 1.0118010864257811, 1.0121104125976563, 1.0117160034179689, 1.0116536254882813, 1.0114908447265625, 1.0122445068359376, 1.0119515991210937, 1.0117140502929687, 1.011957763671875, 1.0116392822265625, 1.0119588012695313, 1.0129981689453125, 1.011984375, 1.0124021606445313, 1.01186865234375, 1.0122096557617188, 1.0120274047851563, 1.0117069091796875, 1.0121103515625, 1.0121615600585938, 1.0125332641601563, 1.0121226196289062, 1.01194140625, 1.0114242553710937, 1.0118287353515625, 1.011631103515625, 1.0119935913085938, 1.0117672729492186, 1.0120878295898437, 1.0116167602539063, 1.0122476196289063, 1.0120057983398438, 1.0121021728515625, 1.011689453125, 1.0124891967773437, 1.0117969970703125, 1.0116424560546875, 1.0114559326171875, 1.0116290283203124, 2.106623046875, 1.0121912841796874, 1.012216796875, 1.0120878295898437, 1.01224755859375, 1.0123724365234374, 1.0121103515625, 1.011904541015625, 1.0120908813476563, 1.0119536743164061, 1.0122240600585937, 1.0117017211914063, 1.0118287353515625, 1.0117017822265626, 1.0116792602539062, 1.0114119873046874, 1.0114088745117187, 1.01127783203125, 1.011852294921875, 1.01174169921875, 1.011968994140625, 1.0117713623046876, 1.0116188354492188, 1.0120038452148437, 1.011915771484375, 1.0112901000976562, 1.0112747802734374, 1.0113668823242188, 1.01144677734375, 1.0113177490234375, 1.0119198608398436, 1.0120233154296876, 1.0116013793945313, 1.0115082397460937, 1.0116761474609375, 1.0115245971679687, 1.0114263305664062, 1.0112634887695313, 1.011557373046875, 1.0114109497070312, 1.0121984252929688, 1.0121513061523437, 1.01176318359375, 1.0114232177734375, 1.011726318359375, 1.011304443359375, 1.0149703979492188, 1.0115143432617189, 1.012031494140625, 1.0119803466796875, 1.0133125, 1.0118717651367188, 1.0115481567382814, 1.0116792602539062, 1.0120653076171875, 1.0115502319335938, 1.0118072509765625, 1.0117089233398437, 1.0117550048828126, 1.0120355834960937, 1.0122034912109374, 1.011757080078125, 1.0119905395507813, 2.102846435546875, 1.0113546142578125, 1.0115645141601564, 1.011766357421875, 1.0118368530273438, 1.011746826171875, 1.0118656005859374, 1.0115020751953125, 1.0114826049804688, 1.0119618530273438, 1.01157373046875, 1.0117109985351562, 1.0118194580078126, 1.0120345458984374, 1.0119188232421874, 1.0118768920898438, 1.0121174926757812, 1.0112481079101563, 1.011708984375, 1.011843017578125, 1.011999755859375, 1.0120202026367187, 1.0120222778320314, 1.0137774047851562, 1.011525634765625, 1.0120479125976563, 1.011984375, 1.0118092651367188, 1.0117805786132812, 1.0120990600585937, 1.0119854125976562, 1.01212158203125, 1.0122670288085938, 1.0120601806640626, 1.0118174438476562, 1.0118410034179688, 1.0115552978515625, 1.011240966796875, 1.011661865234375, 1.0114876708984375, 1.011441650390625, 1.01127783203125, 1.0116198120117188, 1.0113597412109374, 1.0116414184570313, 1.0120242309570313, 1.0122291259765626, 1.013486572265625, 1.0116536254882813, 1.0121564331054687, 1.0123980712890626, 1.0119321899414062, 1.012010009765625, 1.0122987670898438, 1.0121973876953125, 1.0126295166015624, 1.0126663818359376, 1.0119352416992187, 1.0116690063476563, 1.0120150756835937, 1.0120540771484374, 1.01182666015625, 1.0116351928710938, 2.10330615234375, 1.0118348999023437, 1.0117386474609376, 1.0117161254882812, 1.01167822265625, 1.0121646118164063, 1.0119137573242187, 1.01201611328125, 1.0119761962890625, 1.0122608032226563, 1.0121830444335937, 1.0119669799804687, 1.0134896850585937, 1.0120386352539064, 1.0123622436523438, 1.0113034057617187, 1.0113535766601562, 1.011219482421875, 1.0112798461914063, 1.0111006469726562, 1.0113710327148437, 1.0122311401367188, 1.0115225830078125, 1.0115430297851562, 1.0117457885742187, 1.0114498291015626, 1.0119761962890625, 1.0120970458984375, 1.0119505615234374, 1.0119619140625, 1.0121400146484374, 1.0117672729492186, 1.0114600830078124, 1.0114866943359375, 1.011557373046875, 1.011900390625, 1.0118594360351563, 1.01203662109375, 1.0116956176757812, 1.0115460815429687, 1.0117755126953125, 1.0116608276367187, 1.01161474609375, 1.0111918334960937, 1.0146948852539062, 1.01151953125, 1.011521484375, 1.0112010498046875, 1.01165771484375, 1.0114376220703125, 1.01186865234375, 1.0118800048828125, 1.0115624389648437, 1.0114273071289062, 1.011641357421875, 1.0113966064453126, 1.0116741333007813, 1.011451904296875, 1.0115655517578126, 1.011557373046875, 1.012046875, 1.0120038452148437, 1.011968017578125, 2.10362060546875, 1.0117232666015625, 1.0121318359375, 1.0118696899414064, 1.0120601806640626, 1.0119393310546876, 1.012031494140625, 1.0123212890625, 1.0119556884765626, 1.01214208984375, 1.0124891967773437, 1.0119710693359374, 1.0119178466796874, 1.0113935546875, 1.0116761474609375, 1.0115359497070313, 1.011611572265625, 1.01361767578125, 1.0116761474609375, 1.0114703369140625, 1.0116137084960937, 1.011589111328125, 1.0115932006835937, 1.0114774780273437, 1.0115348510742188, 1.0114713745117188, 1.0118184814453124, 1.0116055297851563, 1.01148876953125, 1.0116792602539062, 1.0118379516601563, 1.0119649047851562, 1.0117376708984376, 1.011568603515625, 1.0120294189453125, 1.01218505859375, 1.0119823608398437, 1.0122332153320313, 1.011926025390625, 1.0118748168945313, 1.0124400634765625, 1.01216357421875, 1.011631103515625, 1.0115266723632812, 1.0125834350585938, 1.0121605224609376, 1.0119229736328126, 1.0119270629882813, 1.01148876953125, 1.0118287353515625, 1.011999755859375, 1.011862548828125, 1.0116915283203125, 1.0118143920898437, 1.011768310546875, 1.0122393798828124, 1.0125045776367188, 1.0162032470703124, 1.0122127075195313, 1.0123765869140624, 1.0122915649414062, 1.012389892578125, 1.0124400634765625, 2.102681640625, 1.0123212280273437, 1.0120990600585937, 1.0122465209960938, 1.0117007446289064, 1.012094970703125, 1.0112553100585937, 1.0112625122070313, 1.0114201049804687, 1.0113854370117188, 1.01134228515625, 1.0119854125976562, 1.0117590942382813, 1.0113126220703126, 1.0112696533203125, 1.011483642578125, 1.0116823120117187, 1.0114539794921875, 1.0115082397460937, 1.0114959106445311, 1.0115481567382814, 1.0120242919921876, 1.0115635375976562, 1.0115850219726563, 1.0117254028320313, 1.0121840209960937, 1.0118276977539062, 1.0116751098632812, 1.0118184814453124, 1.0114385986328125, 1.0118215942382813, 1.0118092651367188, 1.0117406616210938, 1.011473388671875, 1.0117386474609376, 1.0118164672851562, 1.01148876953125, 1.0114867553710938, 1.0149037475585938, 1.0118359375, 1.0122781982421876, 1.0117642211914062, 1.0119249877929688, 1.011294189453125, 1.0117027587890626, 1.0116690063476563, 1.0123939819335936, 1.0120908813476563, 1.0122567749023437, 1.0120653076171875, 1.0122485961914063, 1.012369384765625, 1.012421630859375, 1.0119987182617187, 1.0126663818359376, 1.0115481567382814, 1.0117089233398437, 1.0119823608398437, 1.0117867431640626, 1.0115614624023437, 1.0122546997070312, 1.0117652587890624, 1.012073486328125, 2.104281005859375, 1.011789794921875, 1.0118133544921875, 1.0122199096679687, 1.01174169921875, 1.0117130126953124, 1.0126079711914062, 1.0127093505859375, 1.0125383911132813, 1.0122393798828124, 1.01216357421875, 1.0123182373046875, 1.0122199096679687, 1.0121246948242189, 1.0119669799804687, 1.0121113891601563, 1.0128097534179688, 1.01222705078125, 1.012335693359375, 1.0115757446289062, 1.0118441162109375, 1.0118328247070312, 1.011631103515625, 1.0136719360351563, 1.0121174926757812, 1.01250048828125, 1.012552734375, 1.0127390747070313, 1.0122342529296875, 1.0118225708007813, 1.0121768798828126, 1.0121298217773438, 1.0121860961914062, 1.0118389892578126, 1.012316162109375, 1.0120714111328124, 1.0119198608398436, 1.0119669799804687, 1.0115543823242188, 1.0116760864257812, 1.0120601806640626, 1.0119342041015624, 1.0120653076171875, 1.0115552978515625, 1.0121380004882812, 1.0123212890625, 1.0120806274414063, 1.0117672729492186, 1.0117805786132812, 1.0118287353515625, 1.0118615112304687, 1.0117703857421876, 1.0114365234375, 1.0116065063476563, 1.0118512573242187, 1.0121676635742187, 1.0124697875976563, 1.0122608642578126, 1.0120376586914062, 1.0119403686523438, 1.0120181884765624, 1.0120386352539064, 1.01167822265625, 2.1052109375, 1.0120621948242188, 1.01188916015625, 1.0118225708007813, 1.01174169921875, 1.012010009765625, 1.0117171020507814, 1.0117847290039061, 1.0114641723632813, 1.01161474609375, 1.0114928588867187, 1.0118482055664062, 1.0115112915039062, 1.0117078857421875, 1.0117007446289064, 1.011862548828125, 1.0131630249023438, 1.0117345581054686, 1.0117489013671874, 1.0116443481445312, 1.0115972900390624, 1.0121307983398438, 1.01167822265625, 1.0120202026367187, 1.0114754638671875, 1.012262939453125, 1.0119618530273438, 1.0120448608398438, 1.01155224609375, 1.0120181884765624, 1.0120068969726563, 1.0124298095703126, 1.0118533325195314, 1.0121185302734375, 1.0118779296875, 1.0119566650390626, 1.0118379516601563, 1.0119434204101563, 1.0118062133789063, 1.0120775756835938, 1.0119024658203124, 1.0125721435546875, 1.0118563842773438, 1.0118348999023437, 1.0117273559570312, 1.01222705078125, 1.011905517578125, 1.0115379028320313, 1.0121062622070311, 1.0116116333007812, 1.011979248046875, 1.012168701171875, 1.0120171508789062, 1.01224658203125, 1.0122731323242187, 1.0115819091796876, 1.0118994140625, 1.0117642211914062, 1.0116351928710938, 1.011962890625, 1.0119782104492188, 1.01182568359375, 1.012173828125, 2.105998291015625, 1.0114345092773438, 1.0115174560546876, 1.0114590454101562, 1.0117990112304687, 1.011937255859375, 1.0119331665039062, 1.011800048828125, 1.0117058715820313, 1.0113648681640626, 1.0117181396484376, 1.0116608276367187, 1.0114611206054687, 1.0114600830078124, 1.0114508666992188, 1.0114754638671875, 1.0120274047851563, 1.0133237915039062, 1.0123274536132814, 1.0115994262695311, 1.0119392700195313, 1.011694580078125, 1.0119700317382812, 1.0124237060546875, 1.0117488403320312, 1.0116751098632812, 1.0119721069335939, 1.011651611328125, 1.01169970703125, 1.0113812255859376, 1.0121625366210938, 1.0115175170898438, 1.0117620849609374, 1.011493896484375, 1.0118062133789063, 1.0119086303710938, 1.01222607421875, 1.0115870971679688, 1.0119854125976562, 1.0115471801757812, 1.0122720336914062, 1.0117857055664063, 1.0124462280273439, 1.0115880737304688, 1.0122782592773438, 1.0121298217773438, 1.0129663696289062, 1.011736572265625, 1.0120570678710938, 1.0118963623046875, 1.0122628784179688, 1.0121093139648438, 1.0120888061523436, 1.011789794921875, 1.0123253784179687, 1.0119198608398436, 1.0131046142578124, 1.0120990600585937, 1.0116629028320312, 1.0112337646484375, 1.0118236083984375, 1.0113085327148437, 1.0116392822265625, 2.104349609375, 1.0117294311523437, 1.0120775756835938, 1.0116546630859375, 1.01159423828125, 1.0118696899414064, 1.0115491943359376, 1.0113505249023438, 1.0112051391601562, 1.0120068969726563, 1.0118911743164063, 1.011857421875, 1.01180419921875, 1.0114385375976562, 1.011646484375, 1.011788818359375, 1.0118615112304687, 1.0118656005859374, 1.0119014282226562, 1.0118348999023437, 1.011794921875, 1.011900390625, 1.0119556884765626, 1.0122608642578126, 1.0124257202148437, 1.0121984252929688, 1.0144102172851563, 1.011820556640625, 1.0118195190429689, 1.0117386474609376, 1.0117069091796875, 1.0122393798828124, 1.011646484375, 1.0119147338867187, 1.011462158203125, 1.012073486328125, 1.0115911865234375, 1.011684326171875, 1.0119321899414062, 1.0118062133789063, 1.0121994018554688, 1.0123673706054688, 1.0117744750976563, 1.011873779296875, 1.0116044921875, 1.011989501953125, 1.0118911743164063, 1.01174169921875, 1.011662841796875, 1.0115194702148438, 1.0117089233398437, 1.0126878662109375, 1.0129653930664062, 1.0122413940429686, 1.0118389892578126, 1.0119505615234374, 1.0117611694335937, 1.01174169921875, 1.0118338623046874, 1.0117755126953125, 1.0121144409179688, 1.0123131103515626, 1.0117294311523437]",tokens/s,0.973204117879934,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.217-205.860.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,,,,,MB,3088.633856,14093.385728,0.0,13446.938624,13175.804928,s,10,15.872446411132811,1.5872446411132812,0.001977784195727725,1.5869342651367186,1.5899507080078126,1.5901222045898438,1.5902594018554688,"[1.58719189453125, 1.584764892578125, 1.5848123779296874, 1.5860927734375, 1.5866766357421875, 1.5853348388671875, 1.590293701171875, 1.58790966796875, 1.58991259765625, 1.58945703125]",tokens/s,161.28578630477753,kWh,1.8717543135086694e-05,1.0257253674026287e-05,9.112487845540085e-05,0.00012009967526451382,tokens/kWh,2131562.7992846123,MB,3091.492864,14093.385728,0.0,13446.938624,13300.586496,s,10,928.4395625000001,92.84395624999999,0.007570288473903254,92.84323828125,92.8531703125,92.8533390625,92.8534740625,"[92.8279765625, 92.8535078125, 92.8414140625, 92.8458828125, 92.8531328125, 92.853015625, 92.843109375, 92.8433671875, 92.8399140625, 92.8382421875]",tokens/s,0.6785579002079632,kWh,0.0010960241793427203,0.00060071625912522,0.005404193295573401,0.007100933734041341,tokens/kWh,8872.072654048685,,s,629,941.2515487060549,1.4964253556535052,0.1885841806933767,1.4736722412109375,1.4742849609375,1.4745136962890626,3.0601340332031253,"[1.4728734130859376, 1.473666015625, 1.473407958984375, 1.47373974609375, 1.4734244384765625, 1.47390771484375, 1.47325244140625, 1.4737960205078124, 1.4734847412109375, 1.4734468994140626, 1.473364013671875, 1.47363330078125, 1.4732943115234376, 1.47340087890625, 1.4733291015625, 1.47338134765625, 1.473206298828125, 1.473244140625, 1.4738125, 1.473511474609375, 1.4734613037109374, 1.4737889404296876, 1.473449951171875, 1.473580078125, 1.473765380859375, 1.473459228515625, 1.473560546875, 1.47323388671875, 1.474029541015625, 1.4734468994140626, 1.473263671875, 1.4734326171875, 1.47308642578125, 1.4729144287109375, 1.4731171875, 1.47327490234375, 1.473290283203125, 1.4730946044921875, 1.4730894775390626, 1.4733568115234374, 1.4730352783203124, 1.473344482421875, 1.4731263427734376, 1.4731683349609375, 1.4734151611328126, 1.4735421142578125, 1.473369140625, 1.4731141357421875, 1.4733404541015624, 1.47325439453125, 1.4731715087890624, 1.47344384765625, 1.4731990966796875, 1.473396728515625, 1.473218505859375, 1.4735196533203125, 1.4733302001953126, 1.47679638671875, 1.473838134765625, 1.4735543212890625, 1.4735584716796875, 1.473474609375, 3.058888671875, 1.4736353759765626, 1.4734478759765626, 1.47342333984375, 1.4732247314453124, 1.4741309814453125, 1.4735350341796876, 1.4742303466796876, 1.474051025390625, 1.4743828125, 1.4741749267578126, 1.4742200927734375, 1.4739588623046875, 1.4743316650390625, 1.4739302978515625, 1.4737408447265625, 1.474087890625, 1.47405517578125, 1.4743592529296874, 1.4743214111328125, 1.474566162109375, 1.473896484375, 1.47449853515625, 1.4739251708984376, 1.47448828125, 1.473300537109375, 1.4734755859375, 1.4742425537109376, 1.4745487060546876, 1.4744595947265624, 1.47428662109375, 1.4733526611328125, 1.4731519775390625, 1.47367529296875, 1.4740152587890625, 1.4736251220703125, 1.473764404296875, 1.4738944091796875, 1.4733157958984375, 1.473659912109375, 1.4736898193359376, 1.47346826171875, 1.4733157958984375, 1.4738472900390625, 1.4736722412109375, 1.4738759765625, 1.4739302978515625, 1.473797119140625, 1.4737530517578126, 1.47359130859375, 1.4737294921875, 1.4736865234375, 1.4733988037109376, 1.47375, 1.473734619140625, 1.4735994873046876, 1.4738953857421875, 1.47376123046875, 1.4736036376953126, 1.473574951171875, 1.474008056640625, 1.473680419921875, 1.473565673828125, 3.06031005859375, 1.4730966796875, 1.4736650390625, 1.4730096435546876, 1.473244140625, 1.4731519775390625, 1.4736844482421876, 1.4732667236328125, 1.4738687744140626, 1.4742548828125, 1.4740458984375, 1.473471435546875, 1.473302490234375, 1.4735523681640625, 1.473406982421875, 1.473101806640625, 1.47376953125, 1.4739149169921875, 1.4731151123046875, 1.4732298583984376, 1.4734039306640625, 1.4731314697265625, 1.473111083984375, 1.4737376708984375, 1.4738216552734376, 1.4737899169921875, 1.4738800048828125, 1.4739691162109374, 1.4734581298828124, 1.47338134765625, 1.4734776611328124, 1.47355029296875, 1.473333251953125, 1.4734776611328124, 1.4733946533203126, 1.4732882080078125, 1.473912841796875, 1.473892333984375, 1.474216064453125, 1.4738175048828126, 1.47407666015625, 1.4737275390625, 1.473375244140625, 1.4743492431640626, 1.4744185791015625, 1.4742845458984375, 1.4742108154296876, 1.474523193359375, 1.4741842041015625, 1.47373876953125, 1.4740675048828125, 1.474164794921875, 1.474018310546875, 1.473081298828125, 1.474177001953125, 1.4735738525390625, 1.47327587890625, 1.4737294921875, 1.4737100830078125, 1.4734151611328126, 1.4735892333984375, 1.47363330078125, 1.473797119140625, 3.0603828125, 1.4735103759765624, 1.4731171875, 1.4736036376953126, 1.4737879638671876, 1.4736036376953126, 1.47382470703125, 1.4734530029296875, 1.473491943359375, 1.4732840576171875, 1.4735810546875, 1.4739404296875, 1.4733045654296875, 1.4736578369140625, 1.4735308837890626, 1.4735565185546875, 1.4735206298828125, 1.47350830078125, 1.4735667724609376, 1.473364013671875, 1.4735780029296874, 1.473996826171875, 1.4732401123046874, 1.4733731689453125, 1.4740458984375, 1.473659912109375, 1.4739732666015626, 1.4740029296875, 1.47403369140625, 1.4737161865234376, 1.474050048828125, 1.474302978515625, 1.47325439453125, 1.473807373046875, 1.474555908203125, 1.474566162109375, 1.4731663818359375, 1.473481689453125, 1.4736650390625, 1.473850341796875, 1.4740213623046876, 1.4742425537109376, 1.4736527099609376, 1.4736947021484375, 1.473903564453125, 1.4739005126953124, 1.473833984375, 1.4741944580078126, 1.473732666015625, 1.4733824462890626, 1.473692626953125, 1.474039794921875, 1.4735994873046876, 1.4731766357421876, 1.4735155029296876, 1.473933349609375, 1.4738759765625, 1.473734619140625, 1.4740357666015624, 1.47370703125, 1.4738463134765625, 1.4742486572265625, 1.4741298828125, 3.061118896484375, 1.473859619140625, 1.4736097412109375, 1.4739149169921875, 1.4740848388671874, 1.474361328125, 1.4743634033203126, 1.4743951416015626, 1.473876953125, 1.4742210693359374, 1.4737510986328124, 1.473217529296875, 1.4732984619140626, 1.473310791015625, 1.473673095703125, 1.4737142333984374, 1.4734090576171874, 1.4737091064453125, 1.474240478515625, 1.4736937255859375, 1.4738544921875, 1.4743326416015625, 1.4735289306640624, 1.4736065673828125, 1.4740234375, 1.4740274658203125, 1.4741329345703125, 1.473870849609375, 1.4740899658203126, 1.473397705078125, 1.473323974609375, 1.473269775390625, 1.4733096923828124, 1.4737264404296875, 1.473578125, 1.47388916015625, 1.473797119140625, 1.474377685546875, 1.47427734375, 1.4738125, 1.473481689453125, 1.473944580078125, 1.473754150390625, 1.4732686767578125, 1.473934326171875, 1.4739149169921875, 1.4737080078125, 1.47405615234375, 1.4740255126953126, 1.4739588623046875, 1.4738177490234374, 1.4743765869140626, 1.474249755859375, 1.473743896484375, 1.4736650390625, 1.4745087890625, 1.473791015625, 1.473939453125, 1.4738421630859375, 1.4741165771484375, 1.4739957275390625, 1.4737919921875, 1.4739312744140625, 3.059681396484375, 1.4742149658203125, 1.4734141845703126, 1.4730894775390626, 1.4733916015625, 1.473896484375, 1.473638427734375, 1.4734898681640625, 1.4739404296875, 1.47435107421875, 1.47458349609375, 1.474293701171875, 1.4747064208984375, 1.4745426025390624, 1.47485693359375, 1.4739322509765624, 1.4736671142578126, 1.473491943359375, 1.4745078125, 1.474566162109375, 1.47476171875, 1.47451806640625, 1.4742261962890626, 1.4741903076171874, 1.47418115234375, 1.4742999267578125, 1.4743060302734374, 1.4741258544921876, 1.473733642578125, 1.4733138427734376, 1.472933837890625, 1.4730772705078126, 1.47359130859375, 1.473479736328125, 1.4740745849609376, 1.4737479248046874, 1.4733055419921874, 1.4737100830078125, 1.474017333984375, 1.4741851806640625, 1.47333935546875, 1.474234375, 1.4739200439453124, 1.473474609375, 1.47338037109375, 1.4740694580078124, 1.473732666015625, 1.4734735107421875, 1.4738309326171875, 1.4739005126953124, 1.473574951171875, 1.473660888671875, 1.47378076171875, 1.473206298828125, 1.4732236328125, 1.4738565673828126, 1.4739322509765624, 1.4736004638671876, 1.473474609375, 1.4738258056640625, 1.4735831298828126, 1.4737120361328124, 1.4735411376953125, 3.0642236328125, 1.4730126953125, 1.47343359375, 1.4729718017578124, 1.4735728759765625, 1.4736158447265626, 1.4739005126953124, 1.47361279296875, 1.473840087890625, 1.4736087646484375, 1.4734949951171874, 1.47426611328125, 1.47422314453125, 1.4739998779296875, 1.4739046630859376, 1.474639892578125, 1.474404296875, 1.474365478515625, 1.4745169677734375, 1.474093017578125, 1.4741513671875, 1.4736055908203125, 1.47390869140625, 1.47327490234375, 1.473606689453125, 1.473691650390625, 1.47382373046875, 1.473692626953125, 1.4730546875, 1.473733642578125, 1.4736373291015625, 1.47371923828125, 1.473250244140625, 1.473560546875, 1.4734674072265626, 1.473487060546875, 1.4741666259765625, 1.47329638671875, 1.473322021484375, 1.473585205078125, 1.4732359619140625, 1.473195068359375, 1.4736046142578125, 1.4733804931640626, 1.473373046875, 1.4742098388671876, 1.4742425537109376, 1.4739527587890624, 1.4736629638671874, 1.473869873046875, 1.47333935546875, 1.4739404296875, 1.4740797119140625, 1.4739158935546874, 1.47359130859375, 1.4735042724609375, 1.4738052978515626, 1.473397705078125, 1.4735718994140625, 1.473728515625, 1.4734254150390624, 1.473385498046875, 1.4733148193359376, 3.0616728515625, 1.4734571533203125, 1.4732861328125, 1.4731346435546875, 1.473143798828125, 1.4733568115234374, 1.4734940185546874, 1.4731695556640625, 1.4739505615234374, 1.473987548828125, 1.4735789794921874, 1.4733271484375, 1.4738790283203125, 1.4730577392578126, 1.473259521484375, 1.474188232421875, 1.474802734375, 1.4733424072265624, 1.473292236328125, 1.474060302734375, 1.4732933349609374, 1.4734765625, 1.47428662109375, 1.4740941162109376, 1.4739312744140625, 1.4739609375, 1.4747392578125, 1.4735155029296876, 1.47369775390625, 1.4734786376953124, 1.473407958984375, 1.473471435546875, 1.4732052001953124, 1.473833984375, 1.4732821044921875, 1.4737459716796875, 1.4735677490234376, 1.473176513671875, 1.4732420654296876, 1.473311767578125, 1.4738421630859375, 1.4735667724609376, 1.4740521240234374, 1.4739364013671874, 1.4734776611328124, 1.4733291015625, 1.4738514404296874, 1.473481689453125, 1.4737315673828124, 1.473935302734375, 1.4735574951171875, 1.473122314453125, 1.47314892578125, 1.473607666015625, 1.4732789306640626, 1.4734888916015625, 1.47401416015625, 1.47483544921875, 1.4746541748046875, 1.47443603515625, 1.4748702392578126, 1.4743603515625, 1.4745528564453125, 3.064785888671875, 1.4746173095703126, 1.47447607421875, 1.4746685791015626, 1.47447607421875, 1.4743643798828125, 1.4738125, 1.4731048583984374, 1.473474609375, 1.47338134765625, 1.473534912109375, 1.473302490234375, 1.473479736328125, 1.4740050048828126, 1.473471435546875, 1.47342333984375, 1.473850341796875, 1.4732401123046874, 1.4734808349609374, 1.4733658447265625, 1.4733629150390626, 1.473560546875, 1.473197021484375, 1.47348681640625, 1.473091552734375, 1.4735728759765625, 1.47310693359375, 1.473244140625, 1.4730096435546876, 1.4732452392578126, 1.473584228515625, 1.473293212890625, 1.4733302001953126, 1.4733189697265625, 1.473532958984375, 1.473896484375, 1.47376953125, 1.474218994140625, 1.473491943359375, 1.4740469970703125, 1.4737294921875, 1.4742374267578124, 1.4740889892578124, 1.473924072265625, 1.473764404296875, 1.473859619140625, 1.4736947021484375, 1.4739609375, 1.47409716796875, 1.47407763671875, 1.4737705078125, 1.4731356201171875, 1.4733404541015624, 1.473121337890625, 1.4731192626953125, 1.4733465576171876, 1.473375244140625, 1.4740101318359375, 1.4735616455078124, 1.47340087890625, 1.4733568115234374, 1.473385498046875, 1.4738780517578125, 3.063844970703125, 1.4731295166015625, 1.4731304931640625, 1.4736036376953126, 1.473776611328125, 1.47394970703125, 1.4742486572265625, 1.4738533935546876, 1.4738216552734376, 1.4736895751953125, 1.4738575439453125, 1.473681396484375, 1.473460205078125, 1.47378076171875, 1.4738780517578125, 1.4737530517578126, 1.473596435546875, 1.4732513427734375, 1.4733271484375, 1.47356982421875, 1.4737847900390626, 1.47350732421875, 1.473412109375, 1.47346533203125, 1.4734541015625, 1.473197021484375, 1.4740101318359375, 1.47386669921875, 1.4737366943359376, 1.4738145751953124, 1.4737264404296875, 1.47363330078125, 1.4732882080078125, 1.473638427734375, 1.4737899169921875, 1.473912841796875, 1.473796142578125, 1.4733824462890626, 1.473281005859375, 1.473376220703125, 1.473712158203125, 1.47343359375, 1.473532958984375, 1.4734254150390624, 1.4737110595703125, 1.473586181640625, 1.4736097412109375, 1.4738094482421875, 1.47342236328125, 1.473691650390625, 1.4737294921875, 1.4736414794921875, 1.4735841064453126, 1.4735462646484374, 1.4737427978515625, 1.473418212890625, 1.473576904296875, 1.473511474609375, 1.47342333984375, 1.4733507080078125, 1.47335986328125, 1.474207763671875, 1.47342138671875]",tokens/s,0.6682591926299519,,,,, 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b96-35bb2b3b3788652a33da6f3e;25abb72a-e904-4325-b74c-8f026ac80b25) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33b9-0e7e4e8447c494ce1657603f;1a9df9a3-6965-4e83-a780-78d465f183cf) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3704, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1490, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1656, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3410-4bcd30a86db7a2bc309ac125;e5e79294-92a8-4a79-ae16-fa86fb5182a3) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32b5-50ffd389335804036fa0ea83;21d2b98e-438c-4908-8b96-4cdd8c5bc4fa) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3105-7736b82a1cb059bd07ec6531;caee63ba-efbd-4ec4-9060-f173d97b6e18) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c3b-42644fde5d5e9c5961d9ce44;baa707c9-b913-49c0-9f23-dc8f3fa101d3) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a330b-72d9ba9b344a91df2c71654e;70589415-37d6-43dc-b045-6144c75c7797) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30ad-6ff95bd903cf86fb5f768c1a;14ec4366-f8ab-4e7b-9f3e-c0bdda1bd16c) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34bd-36274c4d46bd43281c39940d;3ac6d2ac-6899-46ab-bebc-8291ebe14d90) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fe9-7a3d23f0792c80111efc91ca;64dc9c38-5855-4f53-bc46-b805dc9eaa32) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3ad-23ef7a1a18212d7c32ce0232;72408b5e-3011-450e-8e87-97c2303b87d3) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc402-7d34a18f1469d08617bf3cbc;ea61f9d9-ee2c-427b-b6b9-9dc2b65274f5) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc304-29f74f2b4bc24e891ee12c02;83f77c70-9384-477d-8d45-6e454e709f57) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc358-3ccf7705225c15f6081f8d6f;e8fea6ae-a799-4b10-b608-837eed658478) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f3b-3bca5e9666fecbf74956d5c5;d3f2fa35-fa14-4705-a900-030611e210f1) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ee5-2cac1a5e661fbd1052dc5de9;96c2d0d7-ac30-4202-8d6d-e577f2a3dcd1) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: FalconForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a320b-575297d77515287f6396cb8d;154e9964-aabf-4920-91be-a79165c68943) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-66779496-3bc86fd85515929a60fb51b7;d832641e-7e32-4eb6-be1c-43ea0c4d5aa1) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31b3-1b1230b43cb1e5a24cf49abf;79c60a87-9f55-42aa-93e8-104e296ae50e) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe6a-1073f7667d8c35b32460cb58;b39317eb-56ce-4406-a17c-989762d10da6) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPT2LMHeadModel does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3048-7901c05e1028e2f505bf76ad;eeeac868-e1d3-4933-a77c-7ede106dbbd9) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpi6gzcfyw/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp3cnn9zh1/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2bb1-56d4a36e5a10843513c195d8;3b333d8b-5bb0-4445-a4a0-ddd243ed83f5) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33d5-1887305e5fabc43429fa4d7e;8b1320a7-dad0-4f3a-ae50-4a1de209a034) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 559, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a342d-79df3ce57fc21896457950c4;1b2bb201-2b50-4dca-be16-9bbe51b018b3) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32d1-321ee71f62b46c65733442e6;3dddeea2-9cd1-48d2-bfba-7b46ddab7341) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3121-4fd0b7954e1433a258cb57ba;5417471b-a428-4691-a8a1-aa3a3288098d) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c56-33be37f341a20ffb122858ce;0341494b-0435-4e2f-8ac1-bd001bf92139) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3327-40dd8955330c60f1141e5708;0e37daa3-6407-46c9-ae9d-54f485d7a18d) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp6u8f889d/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30cb-134e4b903888a9ff6e09e2ac;eb0fdb99-e3f9-41ba-ac95-abb44919b6b3) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp4140ghqs/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34da-6df678121961aa9c3298fe24;f64ad6a8-6202-4dda-95db-12c5643b1cfb) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3008-72ae75a35a2932391c7afc32;01be9dff-6af4-4d3c-ae06-d41ab1edf70f) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3ca-7400c4436981167a4f80780d;bae21caa-ec82-485a-a288-49a5b7a8e22f) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmppbwhq2zy/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc420-4098f41e41f1aeca4721a516;898962b0-7d85-48b9-a021-984c2daf9972) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc31f-60b5dc0c21149210189286dd;a1d7abe2-7429-472a-8e07-c596023fa542) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc375-1d72a9451c1dc96e5ff28839;ff3e3ebd-0eea-4c1b-bc58-b3b0fb1b0fba) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f5a-564035c07256784251b1ce3f;c9ae6d8c-0866-4249-ab8b-6e94077e323f) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f02-32833e1812d3c0f274ef110e;fa5bb58c-315d-4879-80da-b5f8e295bbe3) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1241.440256,2645.03296,0.0,1998.585856,1692.285952,s,10,0.1919048309326172,0.01919048309326172,0.0005813408225469491,0.019010607719421386,0.019647625350952148,0.020231876564025877,0.020699277534484865,"[0.02081612777709961, 0.019026687622070312, 0.018811967849731444, 0.01893507194519043, 0.019049951553344727, 0.018704191207885742, 0.01899452781677246, 0.019173408508300783, 0.018875104904174805, 0.019517791748046874]",tokens/s,13339.945573850004,kWh,2.2137678517830954e-07,1.2130415742105693e-07,6.751076978052327e-07,1.0177886404045992e-06,tokens/kWh,251525699.77420157,MB,1241.735168,2645.03296,0.0,1998.585856,1740.085248,s,10,11.541304443359374,1.1541304443359375,0.013489751503469302,1.149205810546875,1.1732497924804688,1.1755724670410155,1.177430606689453,"[1.1778951416015624, 1.1547576904296875, 1.1516905517578124, 1.172733642578125, 1.1467210693359375, 1.1696463623046875, 1.1408421630859376, 1.139661865234375, 1.142564697265625, 1.144791259765625]",tokens/s,54.586550687733464,kWh,1.3797132453780056e-05,7.558356126426566e-06,2.913202725899442e-05,5.048751583920103e-05,tokens/kWh,1247833.230706979,,s,629,11.692877828598027,0.01858963088807317,0.002323445722503397,0.018134016036987305,0.01885880355834961,0.019136306762695315,0.03729248260498047,"[0.019385343551635743, 0.0192225284576416, 0.018934783935546876, 0.01901055908203125, 0.01903001594543457, 0.018994176864624023, 0.019083263397216797, 0.019182592391967773, 0.01884671974182129, 0.018938880920410156, 0.019106815338134766, 0.019335168838500977, 0.01993011283874512, 0.01969254493713379, 0.019323904037475585, 0.018969663619995115, 0.018957248687744142, 0.01906790351867676, 0.01939455986022949, 0.01918976020812988, 0.018973695755004884, 0.018967552185058592, 0.018592767715454102, 0.01882931137084961, 0.01986457633972168, 0.019759103775024413, 0.01927577590942383, 0.018947071075439453, 0.018954240798950195, 0.018718751907348632, 0.01868899154663086, 0.018881536483764647, 0.018890752792358398, 0.01879449653625488, 0.018750463485717773, 0.018704383850097657, 0.018181119918823242, 0.018052095413208007, 0.01804902458190918, 0.018082815170288084, 0.01805414390563965, 0.018017280578613282, 0.018066432952880858, 0.01804287910461426, 0.01808076858520508, 0.01808793640136719, 0.018129919052124025, 0.01807155227661133, 0.017999872207641602, 0.018085887908935547, 0.01802444839477539, 0.018131967544555663, 0.018059263229370116, 0.018091007232666014, 0.018020351409912108, 0.018124799728393554, 0.017999872207641602, 0.01805516815185547, 0.018081792831420897, 0.01821183967590332, 0.018096128463745118, 0.018970624923706055, 0.039049217224121094, 0.01886207962036133, 0.018746368408203123, 0.018647039413452148, 0.01864089584350586, 0.018724863052368163, 0.01864396858215332, 0.018092031478881835, 0.01817190361022949, 0.018177024841308592, 0.018156543731689453, 0.018041856765747072, 0.018233343124389647, 0.018233343124389647, 0.018225151062011717, 0.01824358367919922, 0.018241535186767577, 0.018158592224121094, 0.018133056640625, 0.01810220718383789, 0.01814630317687988, 0.018093055725097656, 0.01806540870666504, 0.018025472640991212, 0.01816166305541992, 0.018155519485473632, 0.018131967544555663, 0.01815449523925781, 0.01820159912109375, 0.018340864181518556, 0.018150400161743165, 0.018102272033691406, 0.018126848220825196, 0.018134016036987305, 0.01820364761352539, 0.018164735794067383, 0.01820876884460449, 0.018197504043579102, 0.01820159912109375, 0.01819443130493164, 0.01808896064758301, 0.018144256591796876, 0.018129919052124025, 0.018091007232666014, 0.018099199295043944, 0.018150400161743165, 0.018698272705078126, 0.018225120544433593, 0.018116607666015624, 0.018067455291748045, 0.01820057678222656, 0.018164735794067383, 0.018551807403564453, 0.018906112670898437, 0.018751487731933594, 0.01883033561706543, 0.018760704040527345, 0.018741247177124023, 0.018735103607177735, 0.0186746883392334, 0.018655231475830078, 0.018259967803955078, 0.018106367111206053, 0.037272575378417966, 0.01804287910461426, 0.018233343124389647, 0.01804697608947754, 0.018033664703369142, 0.018062335968017578, 0.018127872467041017, 0.018066432952880858, 0.01813711929321289, 0.018199520111083986, 0.01845452880859375, 0.018223104476928712, 0.01807360076904297, 0.018113536834716795, 0.018190336227416993, 0.01820979118347168, 0.018121728897094725, 0.018137088775634767, 0.018068479537963866, 0.018098175048828127, 0.018137088775634767, 0.018147327423095702, 0.018077695846557617, 0.018140159606933593, 0.018127872467041017, 0.018028543472290038, 0.018081792831420897, 0.01804697608947754, 0.018106367111206053, 0.018052095413208007, 0.018062335968017578, 0.01801523208618164, 0.018132991790771484, 0.018094079971313477, 0.018069503784179687, 0.018031648635864258, 0.018113504409790038, 0.018044927597045898, 0.018437120437622072, 0.01925632095336914, 0.01884774398803711, 0.018707456588745116, 0.018890752792358398, 0.01904640007019043, 0.018824192047119142, 0.018703359603881836, 0.018787328720092773, 0.018679807662963867, 0.018689023971557618, 0.018731008529663085, 0.018735103607177735, 0.018665504455566407, 0.018631647109985352, 0.01864396858215332, 0.018618368148803712, 0.018663423538208008, 0.018375680923461913, 0.01799884796142578, 0.017819648742675782, 0.01783193588256836, 0.01783296012878418, 0.017921024322509766, 0.01779916763305664, 0.037171199798583986, 0.01801215934753418, 0.01789952087402344, 0.017904640197753906, 0.018226175308227538, 0.01882828712463379, 0.018754560470581053, 0.018746368408203123, 0.018753536224365236, 0.01880985641479492, 0.0188723201751709, 0.01885798454284668, 0.018932735443115235, 0.018494464874267577, 0.018233343124389647, 0.018856960296630858, 0.018811904907226562, 0.01864908790588379, 0.018693119049072265, 0.018749439239501953, 0.018752511978149415, 0.01867263984680176, 0.01923686408996582, 0.019318784713745117, 0.01998028755187988, 0.018965503692626954, 0.018785280227661134, 0.018815999984741212, 0.018700288772583007, 0.01884262466430664, 0.018718719482421875, 0.018868223190307617, 0.018815999984741212, 0.019164159774780275, 0.01888768005371094, 0.018765823364257812, 0.018686975479125977, 0.018481151580810547, 0.018116607666015624, 0.018117631912231445, 0.018100223541259765, 0.018150400161743165, 0.01819545555114746, 0.018114591598510744, 0.018619359970092772, 0.018117631912231445, 0.018189311981201172, 0.01824051284790039, 0.018177024841308592, 0.018135040283203126, 0.01820262336730957, 0.018158624649047852, 0.01811452865600586, 0.018732032775878905, 0.01821696090698242, 0.018974720001220705, 0.01884979248046875, 0.018716672897338867, 0.018817024230957033, 0.01866444778442383, 0.018647039413452148, 0.019155967712402345, 0.019417087554931642, 0.03750092697143555, 0.018118656158447266, 0.01828659248352051, 0.018125823974609375, 0.01805414390563965, 0.018811904907226562, 0.018735103607177735, 0.0184770565032959, 0.018504703521728515, 0.018662399291992187, 0.018761728286743166, 0.018754560470581053, 0.018148351669311523, 0.018076671600341796, 0.018160640716552736, 0.018207744598388673, 0.018172927856445312, 0.018312192916870116, 0.018480127334594726, 0.018217983245849608, 0.018116607666015624, 0.01823641586303711, 0.018152448654174806, 0.018135040283203126, 0.01826304054260254, 0.01798963165283203, 0.01804595184326172, 0.018339839935302735, 0.018109439849853515, 0.018084863662719726, 0.01803264045715332, 0.01802956771850586, 0.018092031478881835, 0.01806438446044922, 0.01800396728515625, 0.018047008514404297, 0.018133983612060547, 0.018128896713256838, 0.018109439849853515, 0.018044927597045898, 0.018116607666015624, 0.018112512588500978, 0.018077695846557617, 0.018052095413208007, 0.018108415603637695, 0.018172927856445312, 0.018084863662719726, 0.018033664703369142, 0.01804595184326172, 0.018159616470336915, 0.018166784286499024, 0.01809715270996094, 0.018106367111206053, 0.018134016036987305, 0.018110464096069336, 0.01809715270996094, 0.018217983245849608, 0.01862451171875, 0.018117631912231445, 0.018137088775634767, 0.018086912155151368, 0.018139135360717772, 0.018082815170288084, 0.037372928619384765, 0.019594240188598632, 0.01887846374511719, 0.018699264526367186, 0.018685951232910156, 0.018745344161987306, 0.0186562557220459, 0.018723840713500976, 0.018711551666259766, 0.018807807922363282, 0.018708480834960937, 0.018749439239501953, 0.018676736831665038, 0.018697216033935548, 0.018767871856689454, 0.018767871856689454, 0.018817024230957033, 0.018683904647827147, 0.018645023345947264, 0.01865622329711914, 0.01864089584350586, 0.018694143295288086, 0.018293760299682618, 0.018084863662719726, 0.018115583419799804, 0.018192384719848635, 0.01815449523925781, 0.018799615859985352, 0.01866035270690918, 0.018714624404907225, 0.01863884735107422, 0.018683904647827147, 0.018699264526367186, 0.0186746883392334, 0.018709503173828124, 0.01882521629333496, 0.018779136657714843, 0.018700288772583007, 0.01929113578796387, 0.01887027168273926, 0.018775039672851563, 0.018670591354370117, 0.018784255981445314, 0.01879859161376953, 0.01863987159729004, 0.018677759170532226, 0.01880985641479492, 0.01878937530517578, 0.018669567108154296, 0.01821696090698242, 0.018050048828125, 0.018114559173583983, 0.018089984893798827, 0.01817190361022949, 0.018095104217529297, 0.01802444839477539, 0.01803059196472168, 0.018067455291748045, 0.018166784286499024, 0.018106367111206053, 0.018116607666015624, 0.018066432952880858, 0.01820979118347168, 0.03734732818603516, 0.01810534477233887, 0.01814630317687988, 0.01808896064758301, 0.018041856765747072, 0.01805619239807129, 0.018103296279907227, 0.018070528030395508, 0.017984512329101563, 0.018099199295043944, 0.018062335968017578, 0.018096128463745118, 0.01801215934753418, 0.018069503784179687, 0.01802444839477539, 0.018127872467041017, 0.018061311721801757, 0.017991680145263672, 0.01801625633239746, 0.01803468894958496, 0.018074623107910158, 0.01801113510131836, 0.018058240890502928, 0.018307071685791015, 0.01817804718017578, 0.018010112762451173, 0.018112512588500978, 0.018100223541259765, 0.018069503784179687, 0.018035711288452147, 0.01807360076904297, 0.01804800033569336, 0.01803264045715332, 0.01805414390563965, 0.018062335968017578, 0.018094079971313477, 0.018051071166992186, 0.01806540870666504, 0.018123775482177733, 0.017978368759155275, 0.018130943298339842, 0.018027519226074217, 0.018078720092773438, 0.01800294494628906, 0.018018304824829103, 0.01806438446044922, 0.018033695220947266, 0.018548704147338866, 0.018318336486816408, 0.018074623107910158, 0.01804287910461426, 0.018137088775634767, 0.018116607666015624, 0.018299903869628906, 0.018448383331298827, 0.018397184371948243, 0.01822105598449707, 0.018160640716552736, 0.018083839416503905, 0.01819340705871582, 0.018143232345581056, 0.018141183853149414, 0.01808896064758301, 0.037615615844726565, 0.01807360076904297, 0.018068479537963866, 0.018083839416503905, 0.018127872467041017, 0.01803775978088379, 0.018241535186767577, 0.01798963165283203, 0.018557952880859374, 0.018586624145507814, 0.018172927856445312, 0.018067455291748045, 0.01803878402709961, 0.018145280838012694, 0.018100223541259765, 0.018101247787475586, 0.018332672119140626, 0.018173952102661133, 0.01820057678222656, 0.017988639831542967, 0.01809404754638672, 0.018164735794067383, 0.018033664703369142, 0.017838079452514647, 0.017934335708618163, 0.018098175048828127, 0.018100223541259765, 0.018076671600341796, 0.018108415603637695, 0.018108415603637695, 0.018044927597045898, 0.01805721664428711, 0.018168832778930662, 0.018124799728393554, 0.017839103698730468, 0.0178657283782959, 0.01810742378234863, 0.018242528915405273, 0.01802649688720703, 0.018190336227416993, 0.018053119659423827, 0.01803468894958496, 0.017968128204345703, 0.017933311462402343, 0.01803878402709961, 0.018069503784179687, 0.0180316162109375, 0.018095104217529297, 0.01797532844543457, 0.018125823974609375, 0.018138080596923827, 0.01808076858520508, 0.01804287910461426, 0.01801215934753418, 0.01802649688720703, 0.01809715270996094, 0.018018304824829103, 0.01804287910461426, 0.017999872207641602, 0.018067455291748045, 0.018019327163696287, 0.01802137565612793, 0.01806540870666504, 0.03730022430419922, 0.018094079971313477, 0.018059263229370116, 0.01804287910461426, 0.018068479537963866, 0.01806540870666504, 0.01783705520629883, 0.017780736923217775, 0.017881088256835938, 0.018093055725097656, 0.018077695846557617, 0.018033664703369142, 0.01805721664428711, 0.018053119659423827, 0.018106367111206053, 0.018098175048828127, 0.01809715270996094, 0.018293760299682618, 0.018572288513183592, 0.018217983245849608, 0.018103296279907227, 0.018078720092773438, 0.01804595184326172, 0.01818009567260742, 0.01806540870666504, 0.018141183853149414, 0.018036735534667968, 0.01805721664428711, 0.01779199981689453, 0.017710079193115236, 0.017810432434082032, 0.017822719573974608, 0.01781760025024414, 0.01787392044067383, 0.01781760025024414, 0.017908735275268553, 0.018152448654174806, 0.018028543472290038, 0.01804902458190918, 0.01803980827331543, 0.018033664703369142, 0.018020351409912108, 0.01802137565612793, 0.018041856765747072, 0.018025472640991212, 0.018141183853149414, 0.018959360122680666, 0.019607551574707033, 0.01887948799133301, 0.01866444778442383, 0.01859174346923828, 0.01863680076599121, 0.018182144165039063, 0.01803980827331543, 0.018092031478881835, 0.01807257652282715, 0.018306047439575195, 0.018158592224121094, 0.018062335968017578, 0.018176000595092775, 0.01822105598449707, 0.018059263229370116, 0.018125823974609375, 0.03789209747314453, 0.018148351669311523, 0.01803468894958496, 0.018094079971313477, 0.017999872207641602, 0.018111488342285157, 0.01804902458190918, 0.018020351409912108, 0.018093055725097656, 0.01810534477233887, 0.018127872467041017, 0.018068511962890624, 0.0180664005279541, 0.018096128463745118, 0.01821696090698242, 0.018103296279907227, 0.018053119659423827, 0.018055200576782227, 0.018034656524658202, 0.01801113510131836, 0.018131967544555663, 0.01807360076904297, 0.01802649688720703, 0.01801420783996582, 0.01799679946899414, 0.018027519226074217, 0.01835212707519531, 0.018299903869628906, 0.018121728897094725, 0.018103296279907227, 0.01825484848022461, 0.018123775482177733, 0.01818623924255371, 0.018493440628051756, 0.01862451171875, 0.01944371223449707, 0.01863167953491211, 0.018110464096069336, 0.018092031478881835, 0.018112512588500978, 0.018126848220825196, 0.01802649688720703, 0.018069503784179687, 0.0180316162109375, 0.018068479537963866, 0.018126848220825196, 0.018089984893798827, 0.018099199295043944, 0.018084863662719726, 0.01804697608947754, 0.01818623924255371, 0.01801625633239746, 0.01804697608947754, 0.018050048828125, 0.018163711547851562, 0.01810534477233887, 0.017994752883911135, 0.018119680404663087, 0.01820057678222656, 0.01805619239807129, 0.018568191528320312, 0.018714624404907225, 0.018725887298583984]",tokens/s,53.79342957484891,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: FalconForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp5eynn9ti/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp8jwbl6lu/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3227-77919e8e655d3232102d5250;6137733b-5015-4993-a6da-a4f0b4d28fc0) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667794b3-6bbd00527d87553c311cab9a;2bb6b5aa-d724-471f-98f0-7f3868bf2874) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31d0-5d6942fa681b63c16c03cdcc;32d2b448-179d-48a1-8649-815bd22a9c52) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 501, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 326, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe86-7c2a3fbe52c934a50730cba0;4a6bf3a9-396e-46f5-b593-91c228e87fe4) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1214.447616,1005.060096,0.0,358.612992,318.913024,s,23,0.16858035135269164,0.007329580493595289,0.0002587299119465544,0.007231008052825927,0.007471584033966065,0.007511299371719361,0.00818978693008423,"[0.008380064010620117, 0.007363743782043457, 0.007418399810791016, 0.007246111869812012, 0.007178239822387696, 0.007166944026947022, 0.007157599925994873, 0.007199552059173584, 0.007136896133422852, 0.007476480007171631, 0.007105247974395752, 0.0071660480499267576, 0.007231008052825927, 0.007161407947540283, 0.007184447765350342, 0.007153855800628662, 0.007515168190002442, 0.007452000141143799, 0.007419456005096436, 0.007367104053497314, 0.007440927982330322, 0.007213119983673096, 0.00744652795791626]",tokens/s,34926.964813838546,kWh,8.452715794226453e-08,4.6316905321826644e-08,1.8026361583691486e-07,3.11107679101006e-07,tokens/kWh,822866220.2738029,MB,1214.447616,1005.060096,0.0,358.612992,328.804864,s,23,9.953621459960937,0.4327661504330842,0.010704234458509567,0.429077880859375,0.44030704345703126,0.44415710449218754,0.4681292211914063,"[0.47477725219726563, 0.4445589294433594, 0.42644979858398435, 0.4242642822265625, 0.42919677734375, 0.42827423095703127, 0.42818402099609376, 0.4242230834960937, 0.42831201171875, 0.429077880859375, 0.4333373718261719, 0.4317017822265625, 0.437157470703125, 0.4248154602050781, 0.42390850830078125, 0.43650335693359377, 0.4387818298339844, 0.4320978088378906, 0.43937249755859376, 0.4277911987304687, 0.42581710815429685, 0.4244781188964844, 0.44054067993164064]",tokens/s,145.5751563216155,kWh,5.077228334491569e-06,2.782076962048813e-06,8.839489976768277e-06,1.6698795273308658e-05,tokens/kWh,3772727.2518095453,,s,1448,10.106253310203536,0.0069794567059416794,0.000952161877616497,0.006773759841918945,0.0070439937591552735,0.007432703852653502,0.014228009071350098,"[0.007984127998352051, 0.007874559879302979, 0.007838719844818116, 0.00810905647277832, 0.007803904056549072, 0.0076360321044921875, 0.007531455993652344, 0.007589888095855713, 0.007426047801971435, 0.007560192108154297, 0.0076063361167907715, 0.007623616218566894, 0.0076574721336364745, 0.007613440036773681, 0.0075335679054260255, 0.007480319976806641, 0.0076472959518432615, 0.007611328125, 0.0075428481101989745, 0.0075550079345703125, 0.00753868818283081, 0.007555071830749512, 0.007600128173828125, 0.0074065918922424315, 0.007469056129455566, 0.0074403839111328125, 0.007422976016998291, 0.0075304961204528805, 0.007518208026885987, 0.007624703884124756, 0.007686143875122071, 0.007682047843933106, 0.007307263851165771, 0.007299071788787842, 0.007363584041595459, 0.007319551944732666, 0.007326720237731933, 0.0074065918922424315, 0.0074741759300231934, 0.007550975799560547, 0.007709695816040039, 0.007379968166351319, 0.007514111995697022, 0.007488512039184571, 0.007510015964508057, 0.007669760227203369, 0.007569407939910889, 0.007693312168121338, 0.0076687359809875484, 0.0076277761459350585, 0.007525375843048096, 0.007512063980102539, 0.007461887836456299, 0.00764415979385376, 0.007400447845458984, 0.0072427520751953125, 0.007208960056304932, 0.007473152160644531, 0.00729702377319336, 0.00703385591506958, 0.007020544052124023, 0.007089183807373047, 0.014714847564697266, 0.007219200134277344, 0.007350272178649903, 0.007436287879943848, 0.007600128173828125, 0.007280640125274658, 0.007155712127685547, 0.007156735897064209, 0.007203839778900147, 0.007258111953735351, 0.00719974422454834, 0.007029759883880615, 0.007017471790313721, 0.007067647933959961, 0.007037951946258545, 0.0069918718338012695, 0.007010335922241211, 0.006995999813079834, 0.006998976230621338, 0.006979584217071533, 0.0070348801612854, 0.007015423774719238, 0.00698367977142334, 0.006958079814910889, 0.006994944095611572, 0.00698367977142334, 0.006978559970855713, 0.006947840213775635, 0.0070348801612854, 0.006961152076721191, 0.007020544052124023, 0.0069683518409729, 0.007003104209899903, 0.007001088142395019, 0.006972415924072266, 0.006957056045532227, 0.007001088142395019, 0.006966271877288818, 0.006965248107910156, 0.007002111911773682, 0.0069816322326660156, 0.007114751815795899, 0.007004159927368164, 0.007090176105499267, 0.007002111911773682, 0.006996992111206054, 0.007081984043121338, 0.007012351989746094, 0.006987775802612305, 0.006988800048828125, 0.0069928960800170895, 0.007177216053009033, 0.007015423774719238, 0.006977536201477051, 0.007003136157989502, 0.007126016139984131, 0.007016448020935059, 0.006986752033233643, 0.006990848064422607, 0.0069632000923156735, 0.0070041918754577635, 0.007000031948089599, 0.007019519805908203, 0.014730239868164062, 0.007007232189178467, 0.0069550080299377445, 0.007005216121673584, 0.0070368962287902835, 0.006977536201477051, 0.006851583957672119, 0.006823935985565186, 0.006765567779541016, 0.006767615795135498, 0.006763519763946534, 0.006762495994567871, 0.0068055038452148435, 0.006909952163696289, 0.006785024166107178, 0.00677785587310791, 0.006772736072540283, 0.0067420158386230465, 0.006738944053649902, 0.006725632190704346, 0.0067010560035705566, 0.006772736072540283, 0.006744063854217529, 0.006779903888702392, 0.006756351947784424, 0.006751232147216797, 0.006719488143920899, 0.006737919807434082, 0.0067758078575134275, 0.006755328178405762, 0.006710271835327148, 0.006716415882110595, 0.006723584175109864, 0.006725632190704346, 0.00672051191329956, 0.0067123198509216305, 0.006694911956787109, 0.006738944053649902, 0.006696959972381592, 0.00672051191329956, 0.006724607944488525, 0.006721536159515381, 0.00674508810043335, 0.006755328178405762, 0.006785024166107178, 0.006756351947784424, 0.006723584175109864, 0.006751232147216797, 0.0067573761940002445, 0.006761472225189209, 0.006666240215301514, 0.006661119937896728, 0.006723584175109864, 0.006661119937896728, 0.006654975891113281, 0.0066375679969787596, 0.006723584175109864, 0.006718463897705078, 0.006703104019165039, 0.006754303932189941, 0.006707200050354004, 0.006717440128326416, 0.006744063854217529, 0.014316543579101563, 0.0067348480224609375, 0.006750207901000976, 0.0067870721817016606, 0.006694911956787109, 0.006708223819732666, 0.0067010560035705566, 0.00672051191329956, 0.0067276802062988285, 0.0066979842185974124, 0.006687744140625, 0.006713344097137451, 0.006761472225189209, 0.006749184131622315, 0.0067645440101623535, 0.006747136116027832, 0.006710271835327148, 0.006718463897705078, 0.006700032234191895, 0.006749184131622315, 0.006680575847625733, 0.006717440128326416, 0.00667852783203125, 0.006739967823028564, 0.006696959972381592, 0.0067010560035705566, 0.006673408031463623, 0.006703104019165039, 0.006800384044647217, 0.00672051191329956, 0.006776832103729248, 0.006722559928894043, 0.006737919807434082, 0.006756351947784424, 0.0067717118263244626, 0.006748159885406494, 0.006724607944488525, 0.00672870397567749, 0.006800384044647217, 0.006699007987976074, 0.0067276802062988285, 0.006737919807434082, 0.0067041277885437015, 0.006756351947784424, 0.006843391895294189, 0.006760447978973389, 0.006755328178405762, 0.006778880119323731, 0.006690815925598144, 0.006692863941192627, 0.006652927875518798, 0.006653952121734619, 0.006666240215301514, 0.006660096168518067, 0.0066375679969787596, 0.006724607944488525, 0.00674508810043335, 0.006793216228485107, 0.006758399963378906, 0.006758399963378906, 0.006765567779541016, 0.006729728221893311, 0.006829055786132812, 0.014258175849914552, 0.006754303932189941, 0.006767615795135498, 0.006770688056945801, 0.006714367866516113, 0.006759424209594727, 0.006788095951080322, 0.006724607944488525, 0.006732800006866455, 0.006737919807434082, 0.006708223819732666, 0.006724607944488525, 0.006731776237487793, 0.0067010560035705566, 0.0067420158386230465, 0.0067717118263244626, 0.006756351947784424, 0.00674508810043335, 0.0067358717918396, 0.00672870397567749, 0.006722623825073242, 0.006734784126281738, 0.006726655960083008, 0.006713344097137451, 0.00687718391418457, 0.006709248065948486, 0.0067051520347595215, 0.006795263767242431, 0.006717440128326416, 0.00669593620300293, 0.00674508810043335, 0.0067338237762451176, 0.00672051191329956, 0.0067758078575134275, 0.006737919807434082, 0.006726655960083008, 0.006694911956787109, 0.0067358717918396, 0.006706175804138184, 0.006700032234191895, 0.006766592025756836, 0.0067348480224609375, 0.0067348480224609375, 0.006829055786132812, 0.0067276802062988285, 0.006977536201477051, 0.006985727787017822, 0.007014400005340577, 0.0069816322326660156, 0.006931456089019775, 0.006980607986450196, 0.006937600135803222, 0.006938623905181885, 0.0069324798583984375, 0.006961152076721191, 0.006959104061126709, 0.006976511955261231, 0.0069847040176391605, 0.006971392154693603, 0.0069550080299377445, 0.006982656002044678, 0.007070720195770264, 0.007047167778015137, 0.014191616058349609, 0.0067123198509216305, 0.006912000179290771, 0.006758399963378906, 0.0068055038452148435, 0.006760447978973389, 0.006781951904296875, 0.006699007987976074, 0.0068280320167541505, 0.006811647891998291, 0.00673689603805542, 0.006723584175109864, 0.006774784088134766, 0.006723584175109864, 0.006739967823028564, 0.0067358717918396, 0.006739967823028564, 0.006717440128326416, 0.006739967823028564, 0.0067051520347595215, 0.006780928134918213, 0.006706175804138184, 0.006874112129211426, 0.006763519763946534, 0.006729728221893311, 0.006726655960083008, 0.006752255916595459, 0.0067010560035705566, 0.006756351947784424, 0.006718463897705078, 0.006817791938781738, 0.007201791763305664, 0.006953983783721924, 0.007018496036529541, 0.006915071964263916, 0.006966271877288818, 0.006953983783721924, 0.006956031799316406, 0.006920191764831543, 0.00694374418258667, 0.00709939193725586, 0.006912000179290771, 0.006710271835327148, 0.006819839954376221, 0.006730751991271973, 0.006739967823028564, 0.0067276802062988285, 0.006729728221893311, 0.006692863941192627, 0.0067051520347595215, 0.0067358717918396, 0.006722559928894043, 0.006740992069244385, 0.006744063854217529, 0.006699007987976074, 0.006892543792724609, 0.006709248065948486, 0.006807551860809326, 0.0067983360290527345, 0.006761472225189209, 0.006715392112731934, 0.00672051191329956, 0.006797311782836914, 0.014218239784240723, 0.0067358717918396, 0.0069621758460998535, 0.007168000221252442, 0.007048192024230957, 0.007046144008636474, 0.006990848064422607, 0.006974463939666748, 0.006982656002044678, 0.006985727787017822, 0.006969344139099121, 0.006968319892883301, 0.006947840213775635, 0.0068249602317810056, 0.006723584175109864, 0.0067348480224609375, 0.0067276802062988285, 0.006848512172698974, 0.00674508810043335, 0.0067051520347595215, 0.006748223781585693, 0.006714303970336914, 0.0067276802062988285, 0.006804480075836182, 0.006754303932189941, 0.0067758078575134275, 0.006685696125030518, 0.006730751991271973, 0.006726655960083008, 0.0067276802062988285, 0.006749216079711914, 0.006711264133453369, 0.006721536159515381, 0.00672870397567749, 0.006696959972381592, 0.006729728221893311, 0.006738944053649902, 0.0067358717918396, 0.006718463897705078, 0.006738944053649902, 0.00674508810043335, 0.0067010560035705566, 0.00672051191329956, 0.0068055038452148435, 0.0067758078575134275, 0.006834176063537598, 0.00673689603805542, 0.0067051520347595215, 0.006752255916595459, 0.006779903888702392, 0.006749184131622315, 0.006716415882110595, 0.006848512172698974, 0.006749184131622315, 0.00676966381072998, 0.006834176063537598, 0.006716415882110595, 0.006725632190704346, 0.00672054386138916, 0.0067338237762451176, 0.006743008136749267, 0.0067686400413513184, 0.006751232147216797, 0.014236672401428223, 0.006725632190704346, 0.0067348480224609375, 0.006715456008911133, 0.006762432098388672, 0.006743040084838867, 0.006689792156219483, 0.006709248065948486, 0.006738944053649902, 0.006714431762695312, 0.006792128086090088, 0.0067686400413513184, 0.006730751991271973, 0.00672870397567749, 0.006724607944488525, 0.006722559928894043, 0.006778880119323731, 0.006765567779541016, 0.006713344097137451, 0.006707200050354004, 0.006724607944488525, 0.006759424209594727, 0.006701087951660157, 0.006740960121154785, 0.006726687908172608, 0.0067266240119934085, 0.006749184131622315, 0.006730751991271973, 0.006681600093841553, 0.006766592025756836, 0.006635519981384277, 0.006624256134033203, 0.006654975891113281, 0.006649856090545654, 0.006687744140625, 0.006709248065948486, 0.006715392112731934, 0.006750207901000976, 0.006752255916595459, 0.006715392112731934, 0.006703104019165039, 0.006806528091430664, 0.006752255916595459, 0.006755328178405762, 0.006747136116027832, 0.006730751991271973, 0.006707200050354004, 0.006713344097137451, 0.006737919807434082, 0.006721536159515381, 0.0067348480224609375, 0.006743040084838867, 0.006743040084838867, 0.006818816184997558, 0.0067358717918396, 0.006817791938781738, 0.006703104019165039, 0.006765567779541016, 0.006790143966674805, 0.0066938881874084475, 0.006729728221893311, 0.006721536159515381, 0.006699007987976074, 0.01420083236694336, 0.00677785587310791, 0.006897664070129395, 0.006767615795135498, 0.0068392958641052244, 0.00672051191329956, 0.006792191982269287, 0.0068351998329162595, 0.006717504024505615, 0.006688704013824463, 0.006752255916595459, 0.006723584175109864, 0.0067870721817016606, 0.00678604793548584, 0.0068884482383728025, 0.007054336071014404, 0.006791168212890625, 0.006876160144805908, 0.007169023990631103, 0.006994944095611572, 0.006714367866516113, 0.006750207901000976, 0.006793216228485107, 0.006711296081542969, 0.006751232147216797, 0.00672870397567749, 0.00667955207824707, 0.006749184131622315, 0.006797311782836914, 0.006726655960083008, 0.006689792156219483, 0.006719488143920899, 0.006754303932189941, 0.0067010560035705566, 0.006744063854217529, 0.00672051191329956, 0.006690815925598144, 0.006833151817321777, 0.0067276802062988285, 0.006732800006866455, 0.00674508810043335, 0.006746111869812011, 0.006723584175109864, 0.006754303932189941, 0.00673689603805542, 0.006783008098602295, 0.006755296230316162, 0.006714367866516113, 0.006714367866516113, 0.006867968082427979, 0.006754303932189941, 0.006830143928527832, 0.006731711864471436, 0.006782976150512696, 0.0067717118263244626, 0.006706175804138184, 0.00684441614151001, 0.006978559970855713, 0.006996992111206054, 0.006918144226074219, 0.006971392154693603, 0.0068853759765625, 0.006964223861694336, 0.015036416053771973, 0.007065599918365479, 0.006990848064422607, 0.007023615837097168, 0.006948863983154297, 0.006990848064422607, 0.006958079814910889, 0.007014400005340577, 0.00693452787399292, 0.006964223861694336, 0.0069253120422363285, 0.006949888229370117, 0.006964223861694336, 0.006959104061126709, 0.006947840213775635, 0.006916096210479736, 0.006853631973266602, 0.006752255916595459, 0.006746111869812011, 0.006703104019165039, 0.006756351947784424, 0.006773759841918945, 0.00672051191329956, 0.006751232147216797, 0.00672051191329956, 0.0067123198509216305, 0.006843391895294189, 0.006763519763946534, 0.006758399963378906, 0.006754303932189941, 0.0067358717918396, 0.006692863941192627, 0.006746111869812011, 0.00674508810043335, 0.006715392112731934, 0.00674508810043335, 0.006738944053649902, 0.006743040084838867, 0.006750207901000976, 0.00672979211807251, 0.00669484806060791, 0.006716415882110595, 0.006749184131622315, 0.006749184131622315, 0.006710271835327148, 0.006737919807434082, 0.006803455829620361, 0.0067051520347595215, 0.006708223819732666, 0.006780928134918213, 0.006688767910003662, 0.0067348480224609375, 0.006717440128326416, 0.006715392112731934, 0.006817791938781738, 0.0067420158386230465, 0.0067870721817016606, 0.006800384044647217, 0.006766592025756836, 0.006739999771118164, 0.006708191871643066, 0.006755328178405762, 0.006746111869812011, 0.014143487930297852, 0.006750207901000976, 0.006739967823028564, 0.006726655960083008, 0.006748159885406494, 0.0067246718406677244, 0.006729663848876953, 0.0067717118263244626, 0.006810624122619629, 0.006706175804138184, 0.006749184131622315, 0.00673689603805542, 0.006711296081542969, 0.0067420158386230465, 0.006732800006866455, 0.006719488143920899, 0.0067645440101623535, 0.006759424209594727, 0.006724607944488525, 0.00672870397567749, 0.006749184131622315, 0.006729728221893311, 0.006677504062652588, 0.006738944053649902, 0.006686751842498779, 0.0066969280242919925, 0.006802432060241699, 0.006756351947784424, 0.007007232189178467, 0.007004159927368164, 0.006947840213775635, 0.007006207942962647, 0.006967296123504638, 0.006986752033233643, 0.006968319892883301, 0.0070553598403930665, 0.006969344139099121, 0.006975488185882568, 0.006986752033233643, 0.0069632000923156735, 0.006961152076721191, 0.006985727787017822, 0.006971392154693603, 0.007068672180175781, 0.006986752033233643, 0.006994944095611572, 0.006972415924072266, 0.006941696166992187, 0.006953983783721924, 0.006989823818206787, 0.006973440170288086, 0.006994944095611572, 0.006929408073425293, 0.006985727787017822, 0.006967296123504638, 0.0070522880554199216, 0.006951935768127441, 0.007003136157989502, 0.006975488185882568, 0.006987775802612305, 0.006959104061126709, 0.00694374418258667, 0.00694271993637085, 0.014244864463806153, 0.006744063854217529, 0.006746111869812011, 0.0067276802062988285, 0.006718463897705078, 0.006841343879699707, 0.006713344097137451, 0.00678604793548584, 0.006906879901885986, 0.006791168212890625, 0.006716415882110595, 0.006849535942077637, 0.006762495994567871, 0.006692863941192627, 0.006747136116027832, 0.006708223819732666, 0.006703104019165039, 0.00679423999786377, 0.006711296081542969, 0.006789120197296142, 0.006779903888702392, 0.00676358413696289, 0.0067491202354431155, 0.006688767910003662, 0.0067420477867126466, 0.00675222396850586, 0.006725632190704346, 0.006752255916595459, 0.0067420158386230465, 0.006717504024505615, 0.006833087921142578, 0.007392255783081054, 0.007624703884124756, 0.007074816226959229, 0.00697654390335083, 0.007215072154998779, 0.006989823818206787, 0.006999040126800537, 0.007142399787902832, 0.006996992111206054, 0.006988800048828125, 0.006978559970855713, 0.006941696166992187, 0.006982656002044678, 0.00693555212020874, 0.006973440170288086, 0.006958079814910889, 0.006959104061126709, 0.006960127830505371, 0.0069959678649902345, 0.0069632000923156735, 0.006716415882110595, 0.006729728221893311, 0.0067062082290649415, 0.006729695796966553, 0.0067348480224609375, 0.006683648109436035, 0.006800384044647217, 0.006818816184997558, 0.006718463897705078, 0.006850560188293457, 0.0067420158386230465, 0.006748159885406494, 0.014536704063415527, 0.006968319892883301, 0.006966271877288818, 0.00695091199874878, 0.00704204797744751, 0.006987775802612305, 0.006970367908477783, 0.006851583957672119, 0.006754303932189941, 0.0068884482383728025, 0.006730751991271973, 0.006762495994567871, 0.006744063854217529, 0.006726655960083008, 0.00675328016281128, 0.006760447978973389, 0.006747136116027832, 0.006714367866516113, 0.006825984001159668, 0.006810624122619629, 0.006713344097137451, 0.006802432060241699, 0.0073820161819458, 0.0069918718338012695, 0.007038976192474365, 0.007009280204772949, 0.006915071964263916, 0.006946815967559815, 0.0069027838706970214, 0.006979584217071533, 0.007037951946258545, 0.0069847040176391605, 0.007095295906066895, 0.006979584217071533, 0.006982656002044678, 0.006957056045532227, 0.006939648151397705, 0.007016448020935059, 0.007017471790313721, 0.006959104061126709, 0.007007232189178467, 0.006953983783721924, 0.006958079814910889, 0.006985727787017822, 0.007005184173583984, 0.00695091199874878, 0.0069959678649902345, 0.006952960014343262, 0.006946815967559815, 0.006927360057830811, 0.006949888229370117, 0.006945792198181152, 0.006974463939666748, 0.007023615837097168, 0.006964223861694336, 0.006966271877288818, 0.006936575889587402, 0.007049215793609619, 0.007016448020935059, 0.006938623905181885, 0.006959104061126709, 0.006936575889587402, 0.006927360057830811, 0.014215167999267577, 0.006761472225189209, 0.006815743923187256, 0.006685696125030518, 0.006874112129211426, 0.006820864200592041, 0.006779903888702392, 0.006732800006866455, 0.006800384044647217, 0.006703104019165039, 0.00673689603805542, 0.006716415882110595, 0.0066979842185974124, 0.006699007987976074, 0.006696959972381592, 0.006717440128326416, 0.006749184131622315, 0.006707200050354004, 0.00672870397567749, 0.006694911956787109, 0.006708223819732666, 0.0066938881874084475, 0.006688767910003662, 0.006703104019165039, 0.006685696125030518, 0.006864895820617676, 0.006916096210479736, 0.0067983360290527345, 0.006809599876403808, 0.006807551860809326, 0.00673689603805542, 0.006708223819732666, 0.006715392112731934, 0.006714367866516113, 0.00678607988357544, 0.006709216117858886, 0.006750207901000976, 0.0067123198509216305, 0.006722591876983643, 0.006701024055480957, 0.006739007949829102, 0.006764480113983154, 0.006756351947784424, 0.006732800006866455, 0.006713344097137451, 0.006716415882110595, 0.006711296081542969, 0.006681600093841553, 0.006714367866516113, 0.006699007987976074, 0.006694911956787109, 0.006732800006866455, 0.006689792156219483, 0.006703104019165039, 0.0067758078575134275, 0.006743040084838867, 0.006756351947784424, 0.006776832103729248, 0.006760447978973389, 0.006729728221893311, 0.006706175804138184, 0.00673689603805542, 0.00672051191329956, 0.014218239784240723, 0.0067010560035705566, 0.006730751991271973, 0.006715392112731934, 0.006696959972381592, 0.006707200050354004, 0.006708223819732666, 0.006744063854217529, 0.006721536159515381, 0.006673408031463623, 0.006746111869812011, 0.006691840171813965, 0.006708223819732666, 0.006749184131622315, 0.006730751991271973, 0.006725696086883545, 0.006710207939147949, 0.006713344097137451, 0.0067041277885437015, 0.006717440128326416, 0.0067051520347595215, 0.006686719894409179, 0.00669593620300293, 0.006739967823028564, 0.006729728221893311, 0.006884352207183838, 0.0067573761940002445, 0.006692863941192627, 0.006700032234191895, 0.006709248065948486, 0.006714367866516113, 0.006707200050354004, 0.006751232147216797, 0.006714367866516113, 0.0067870721817016606, 0.0067420158386230465, 0.006709248065948486, 0.006677504062652588, 0.0067123198509216305, 0.0067041277885437015, 0.0068351998329162595, 0.006729728221893311, 0.006744063854217529, 0.006694911956787109, 0.006737919807434082, 0.0067348480224609375, 0.00672870397567749, 0.006865920066833496, 0.006730751991271973, 0.0067041277885437015, 0.006690815925598144, 0.0067420158386230465, 0.006709248065948486, 0.006683648109436035, 0.006721536159515381, 0.006699007987976074, 0.006702079772949219, 0.006723584175109864, 0.006755328178405762, 0.0067573761940002445, 0.006694911956787109, 0.006703104019165039, 0.0066826238632202144, 0.014199808120727539, 0.006708223819732666, 0.006726655960083008, 0.006726655960083008, 0.0067051520347595215, 0.006708223819732666, 0.006747136116027832, 0.006862847805023193, 0.00672051191329956, 0.00674508810043335, 0.006713344097137451, 0.006717440128326416, 0.006710271835327148, 0.006726655960083008, 0.006721536159515381, 0.006719488143920899, 0.006707200050354004, 0.006739967823028564, 0.006916096210479736, 0.006788095951080322, 0.006837247848510742, 0.006850560188293457, 0.0070225920677185055, 0.007953407764434815, 0.007171072006225586, 0.006988800048828125, 0.0069632000923156735, 0.007014400005340577, 0.00724889612197876, 0.007015423774719238, 0.006952960014343262, 0.007001088142395019, 0.007005184173583984, 0.006944767951965332, 0.00694374418258667, 0.007057407855987549, 0.006978559970855713, 0.006953983783721924, 0.006904831886291504, 0.006892543792724609, 0.006957056045532227, 0.006884352207183838, 0.006863872051239014, 0.00689356803894043, 0.006924287796020508, 0.006998015880584717, 0.006892543792724609, 0.006951935768127441, 0.007155712127685547, 0.007081984043121338, 0.006960127830505371, 0.007018496036529541, 0.0070522880554199216, 0.007150591850280762, 0.0070860800743103025, 0.0070522880554199216, 0.00693452787399292, 0.007058432102203369, 0.006945792198181152, 0.006958079814910889, 0.006944767951965332, 0.006970367908477783, 0.006941696166992187, 0.014789631843566894, 0.0069632000923156735, 0.006973440170288086, 0.006958079814910889, 0.0069632000923156735, 0.0069621758460998535, 0.006931456089019775, 0.006985727787017822, 0.006957056045532227, 0.006941696166992187, 0.006929408073425293, 0.006923264026641846, 0.006958079814910889, 0.006957056045532227, 0.006990848064422607, 0.006978559970855713, 0.00698367977142334, 0.006944767951965332, 0.006917119979858399, 0.00695091199874878, 0.006904831886291504, 0.006956031799316406, 0.006967296123504638, 0.006953983783721924, 0.006938623905181885, 0.006903808116912841, 0.006965248107910156, 0.006949888229370117, 0.00693555212020874, 0.006973440170288086, 0.006994944095611572, 0.006939648151397705, 0.0069816322326660156, 0.006960127830505371, 0.006980607986450196, 0.0070563840866088865, 0.006985727787017822, 0.006752287864685059, 0.0067337918281555175, 0.006853695869445801, 0.006733759880065918, 0.006716415882110595, 0.006744063854217529, 0.006700032234191895, 0.006732800006866455, 0.006743040084838867, 0.006779903888702392, 0.007061503887176514, 0.007062528133392334, 0.006999040126800537, 0.006979584217071533, 0.006915071964263916, 0.006968319892883301, 0.00692633581161499, 0.0069283838272094726, 0.006938623905181885, 0.00801689624786377, 0.007638016223907471, 0.00704307222366333, 0.007014400005340577, 0.006941696166992187, 0.0070225920677185055, 0.007076863765716553, 0.014768128395080566, 0.006933504104614257, 0.006961152076721191, 0.006966271877288818, 0.006978559970855713, 0.006912000179290771, 0.0067358717918396, 0.006732800006866455, 0.0066979842185974124, 0.006715392112731934, 0.006730751991271973, 0.006721536159515381, 0.006842368125915528, 0.00672870397567749, 0.006711296081542969, 0.006741055965423584, 0.00674399995803833, 0.0067758078575134275, 0.006959104061126709, 0.006964223861694336, 0.006971392154693603, 0.006915135860443115, 0.006940608024597168, 0.006872064113616944, 0.0069959678649902345, 0.007156735897064209, 0.006993919849395752, 0.006974463939666748, 0.006920191764831543, 0.006889472007751465, 0.00695091199874878, 0.006952960014343262, 0.00695091199874878, 0.006915071964263916, 0.0070830078125, 0.006978559970855713, 0.006952960014343262, 0.0069632000923156735, 0.006980607986450196, 0.007064576148986816, 0.0069918718338012695, 0.006924287796020508, 0.006948863983154297, 0.006966271877288818, 0.006948863983154297, 0.00679423999786377, 0.006752319812774658, 0.006724544048309326, 0.006639616012573242, 0.0066641921997070315, 0.006675456047058105, 0.006716415882110595, 0.006661119937896728, 0.006628352165222168, 0.006649856090545654, 0.006746111869812011, 0.006722559928894043, 0.006972415924072266, 0.006773759841918945, 0.0067686400413513184, 0.006724607944488525, 0.006721536159515381, 0.006733888149261474, 0.014669759750366211, 0.007174143791198731, 0.007038976192474365, 0.006959104061126709, 0.006977536201477051, 0.007017471790313721, 0.006961152076721191, 0.006960127830505371, 0.007027711868286133, 0.00693452787399292, 0.007000063896179199, 0.006951935768127441, 0.006958079814910889, 0.006951935768127441, 0.006952960014343262, 0.006959104061126709, 0.006951935768127441, 0.006958079814910889, 0.0069816322326660156, 0.006946815967559815, 0.006924287796020508, 0.00698367977142334, 0.006944767951965332, 0.006986752033233643, 0.006969344139099121, 0.006968319892883301, 0.006915103912353515, 0.0069242558479309085, 0.006949888229370117, 0.007111680030822754, 0.007015423774719238, 0.006973440170288086, 0.006959104061126709, 0.007012351989746094, 0.006980607986450196, 0.006960127830505371, 0.00693555212020874, 0.006985727787017822, 0.006936575889587402, 0.006973440170288086, 0.006958079814910889, 0.0069959678649902345, 0.006958079814910889, 0.006933504104614257, 0.007006207942962647, 0.006975552082061767, 0.006967232227325439, 0.006929408073425293, 0.006957056045532227, 0.006936575889587402, 0.006938623905181885, 0.006901792049407959, 0.0069539518356323245, 0.006959104061126709, 0.006945792198181152, 0.006958079814910889, 0.007050240039825439, 0.006960127830505371, 0.00695091199874878, 0.006949888229370117, 0.006945792198181152, 0.006960127830505371, 0.006976511955261231, 0.014671872138977051, 0.00683622407913208, 0.006738944053649902, 0.006726655960083008, 0.006732800006866455, 0.006713344097137451, 0.006724607944488525, 0.006717440128326416, 0.00672051191329956, 0.006718463897705078, 0.006726655960083008, 0.006715392112731934, 0.006681600093841553, 0.006816768169403077, 0.006746111869812011, 0.006716415882110595, 0.006724607944488525, 0.00672051191329956, 0.006711296081542969, 0.006715392112731934, 0.0067123198509216305, 0.006715392112731934, 0.006703104019165039, 0.00672870397567749, 0.0068392958641052244, 0.0067348480224609375, 0.006721536159515381, 0.006716415882110595, 0.006694911956787109, 0.006722559928894043, 0.0067338237762451176, 0.006718463897705078, 0.0066979842185974124, 0.006731776237487793, 0.006715392112731934, 0.006724607944488525, 0.006729728221893311, 0.00672870397567749, 0.0066938881874084475, 0.006746111869812011, 0.006687744140625, 0.006737919807434082, 0.0067010560035705566, 0.0067041277885437015, 0.007002111911773682, 0.006776832103729248, 0.006823935985565186, 0.00672870397567749, 0.0067983360290527345, 0.006721536159515381, 0.006727744102478028, 0.006747072219848633, 0.007356416225433349, 0.006988800048828125, 0.006908927917480469, 0.0069304962158203125, 0.006955967903137207, 0.007030784130096435, 0.00693452787399292, 0.006971392154693603, 0.00694374418258667, 0.007005184173583984, 0.006920191764831543, 0.014754816055297852, 0.00694374418258667, 0.006967296123504638, 0.006953983783721924, 0.007035903930664063, 0.006876160144805908, 0.0067123198509216305, 0.006750207901000976, 0.0067276802062988285, 0.0067276802062988285, 0.006765567779541016, 0.006766592025756836, 0.006710271835327148, 0.006762495994567871, 0.006740992069244385, 0.006776832103729248, 0.006737919807434082, 0.0067276802062988285, 0.006760447978973389, 0.006696959972381592, 0.006744063854217529, 0.006744063854217529, 0.006723584175109864, 0.0068076162338256835, 0.006731711864471436, 0.0067358717918396, 0.006700064182281494, 0.006725599765777588, 0.006718463897705078, 0.006722591876983643, 0.0067337918281555175, 0.0066938881874084475, 0.006730751991271973, 0.006706175804138184, 0.006865920066833496, 0.006724607944488525, 0.006740992069244385, 0.006723584175109864, 0.0067041277885437015, 0.006717440128326416, 0.006737919807434082, 0.006715392112731934, 0.006823935985565186, 0.006739967823028564, 0.006714367866516113, 0.006762495994567871, 0.0067420477867126466, 0.006771679878234863, 0.006731776237487793, 0.006722559928894043, 0.006687744140625, 0.0067338237762451176, 0.006725632190704346, 0.006715392112731934, 0.006706175804138184, 0.006725632190704346, 0.006715392112731934, 0.006710271835327148, 0.006723584175109864, 0.006709248065948486, 0.006684671878814697, 0.006709248065948486, 0.0067123198509216305, 0.01426643180847168, 0.0067255678176879885, 0.006780928134918213, 0.006709248065948486, 0.006739967823028564, 0.006708223819732666, 0.00673689603805542, 0.006763519763946534, 0.006716415882110595, 0.0067051520347595215, 0.006726655960083008, 0.006707200050354004, 0.006721536159515381, 0.00673689603805542, 0.0067348480224609375, 0.006732800006866455, 0.006763519763946534, 0.0067123198509216305, 0.006706175804138184, 0.006708223819732666, 0.0067010560035705566, 0.00676966381072998, 0.00679423999786377, 0.006707200050354004, 0.006684671878814697, 0.00669593620300293, 0.006684671878814697, 0.006699007987976074, 0.006675456047058105, 0.006808576107025147, 0.006714367866516113, 0.006790143966674805, 0.00673689603805542, 0.0067686400413513184, 0.006692863941192627, 0.006718463897705078, 0.0067123198509216305, 0.006731776237487793, 0.006737919807434082, 0.006729728221893311, 0.006708223819732666, 0.0067420158386230465, 0.0067358717918396, 0.006760447978973389, 0.00672870397567749, 0.006732800006866455, 0.006722559928894043, 0.006713344097137451, 0.006749184131622315, 0.006750207901000976, 0.006699007987976074, 0.0067420158386230465, 0.006740992069244385, 0.006699007987976074, 0.006751232147216797, 0.006732800006866455, 0.006831103801727295, 0.0068055038452148435, 0.006739967823028564, 0.006724607944488525, 0.006737919807434082, 0.006761504173278809, 0.006738912105560302, 0.01487667179107666, 0.006949888229370117, 0.00692633581161499, 0.006870016098022461, 0.007027711868286133, 0.006979584217071533, 0.0069632000923156735, 0.006959104061126709, 0.007003136157989502, 0.007009280204772949, 0.006994976043701172, 0.006966303825378418, 0.007005119800567627, 0.007029759883880615, 0.006985727787017822, 0.007004159927368164, 0.007013376235961914, 0.007021567821502686, 0.0069847040176391605, 0.007036928176879883, 0.007038976192474365, 0.006982656002044678, 0.006987775802612305, 0.006975488185882568, 0.007032832145690918, 0.006986752033233643, 0.006975488185882568, 0.006973440170288086, 0.006967296123504638, 0.006989823818206787, 0.006965248107910156, 0.006952960014343262, 0.006985727787017822, 0.006965248107910156, 0.006945792198181152, 0.006945824146270752, 0.0069508800506591795, 0.007271423816680909, 0.0069928960800170895, 0.006933504104614257, 0.0069621758460998535, 0.0069847040176391605, 0.006964223861694336, 0.006957056045532227, 0.007038976192474365, 0.006969344139099121, 0.0069816322326660156, 0.00694374418258667, 0.006977536201477051, 0.006970367908477783, 0.006953983783721924, 0.006970367908477783, 0.006973440170288086, 0.00693555212020874, 0.007002111911773682, 0.0070553598403930665, 0.0070256638526916505, 0.006982656002044678, 0.006973440170288086, 0.007006207942962647, 0.006969344139099121, 0.0069918718338012695, 0.006994944095611572]",tokens/s,143.27762777705752,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3068-45157b6838d0d3151418f9ee;030aac8c-d8bd-4b40-a5f4-d283b04da2f4) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp8kvpasad/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 462, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp7nylci1i/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-7b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-7b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2bb8-79fa1b8d2dae85bb706b6108;3d4906e8-2dd2-4cb6-95a9-cf83b651ac83) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33dc-256ebe675acc1be221d5d63a;237505d5-3f0e-4e89-891d-4119be79814f) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 613, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 419, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 626, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3435-724f780e789092314623be17;d29e2769-bdff-49d8-8a37-b33fd047c9a8) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32d9-4eaeeb296ee249bb457d6e4c;944d9640-fb6f-4590-a93a-48080e8238e6) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3129-2caca5d74efd4fb42d624037;c38f23c2-713a-4de2-b9ff-d033f820dce9) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c5e-6be5d95239acef873a89a310;d8481bb7-ef88-4b2f-8352-9089915a2176) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a332e-0a0110653638cea10d7fb630;cac083d5-7c77-4d56-8109-2dd811f9e92d) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp1nkz3rp0/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30d2-50cd0d407831ccb40ebc260e;0a495ab0-6ef7-4443-94ae-b41b1878c0f0) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-40b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-40b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34e1-0952952a394bdc361e9f805a;dcac1ae2-41b0-46bb-8a2b-91eecce2fa8e) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3010-7f585415030da03d6c2f6133;94a8ec74-a6ac-4641-a294-e75ebf8ee8d0) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc3d1-55707e21768813816ea064d3;1c112b8e-4248-4f39-827b-6ded07ae1b7d) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpu84js1xc/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc427-5841df6b0bedde3d09b0ea65;d38dc8ed-491f-4196-8450-e3c6c2c7d386) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc325-1c038d03304f81627d85c30a;45d89580-5004-49da-8e7b-3a079cd5bf4e) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc37c-1f597c325f5488e14d1ffd02;b76cd793-6465-4356-8f0c-15a25b4e0b8d) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f61-0776fe7c79233d685e39e6ef;fb78a602-08c2-49cc-81f8-23c4d0ea1d58) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f09-20ea90467ee3805a7a7889b9;39569623-5732-4b29-b1b3-fff77aa722cd) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1273.511936,2645.03296,0.0,1998.585856,1692.285952,s,10,0.19512889289855956,0.019512889289855957,0.0006973531616019884,0.019118751525878906,0.02076168899536133,0.020845900917053222,0.020913270454406736,"[0.02074297523498535, 0.019069087982177733, 0.01902137565612793, 0.019066047668457032, 0.01907967948913574, 0.0189399356842041, 0.01915782356262207, 0.019729055404663087, 0.019392799377441407, 0.020930112838745116]",tokens/s,13119.533258105714,kWh,2.258438427837786e-07,1.2375136383443654e-07,6.813650022829518e-07,1.030960208901167e-06,tokens/kWh,248312202.3427593,MB,1273.806848,2645.03296,0.0,1998.585856,1740.085248,s,10,11.767407348632812,1.1767407348632815,0.01424671134639797,1.1756907958984375,1.1905447143554688,1.2001551696777344,1.2078435339355469,"[1.209765625, 1.1791375732421876, 1.183642578125, 1.159637939453125, 1.166791015625, 1.1595640869140624, 1.17467822265625, 1.169077880859375, 1.1884090576171875, 1.176703369140625]",tokens/s,53.537706423768526,kWh,1.3865471741266508e-05,7.59789077589712e-06,2.9375964599116e-05,5.083932711627962e-05,tokens/kWh,1239198.1478414636,,s,629,11.921181676864638,0.018952594080865855,0.002352548224385844,0.0184770565032959,0.01912931900024414,0.01946992607116699,0.03775201217651367,"[0.020113407135009767, 0.019819520950317384, 0.020000768661499024, 0.02002943992614746, 0.020170751571655272, 0.019966976165771484, 0.01986867141723633, 0.019780607223510743, 0.019711999893188475, 0.01991372871398926, 0.02002943992614746, 0.019809280395507813, 0.01985024070739746, 0.01979801559448242, 0.01944063949584961, 0.0192225284576416, 0.01923276710510254, 0.019565568923950196, 0.01946316719055176, 0.01927168083190918, 0.019129344940185547, 0.01903308868408203, 0.018916351318359375, 0.019993600845336915, 0.020050943374633787, 0.019573759078979493, 0.01898700714111328, 0.019013631820678712, 0.019124223709106446, 0.0190515193939209, 0.018741247177124023, 0.018388992309570314, 0.018288639068603514, 0.018325504302978517, 0.01842585563659668, 0.018329599380493163, 0.01836031913757324, 0.019708927154541016, 0.019175424575805664, 0.018696191787719727, 0.01881804847717285, 0.019137535095214844, 0.018902015686035157, 0.018919456481933595, 0.018997215270996095, 0.018655231475830078, 0.018349056243896485, 0.018293760299682618, 0.018259967803955078, 0.018803712844848632, 0.018983936309814452, 0.018994176864624023, 0.018919424057006837, 0.01900032043457031, 0.018935808181762694, 0.018883583068847656, 0.018942975997924806, 0.018905088424682616, 0.018944000244140623, 0.018937856674194335, 0.018896896362304686, 0.019002368927001953, 0.038024192810058595, 0.018856960296630858, 0.018953216552734374, 0.01899212837219238, 0.018933759689331055, 0.018946048736572265, 0.018918399810791017, 0.018514944076538087, 0.018379776000976563, 0.01847603225708008, 0.018405376434326173, 0.01839411163330078, 0.01844428825378418, 0.018465791702270508, 0.018507776260375978, 0.01843404769897461, 0.01840025520324707, 0.018305023193359374, 0.018584575653076172, 0.018944000244140623, 0.01904128074645996, 0.018902015686035157, 0.018913280487060546, 0.018897920608520507, 0.01900441551208496, 0.018922496795654296, 0.018920448303222655, 0.018958335876464845, 0.01924198341369629, 0.018942975997924806, 0.018929664611816405, 0.01883750343322754, 0.018920448303222655, 0.018919424057006837, 0.01887539291381836, 0.018379776000976563, 0.018397184371948243, 0.019178495407104493, 0.018910207748413087, 0.018939903259277344, 0.01884569549560547, 0.018928640365600585, 0.018945024490356444, 0.018886655807495118, 0.01885798454284668, 0.019137535095214844, 0.019356672286987304, 0.018903039932250978, 0.01864806365966797, 0.01850060844421387, 0.018481151580810547, 0.018380800247192384, 0.018301952362060548, 0.018478080749511717, 0.018493440628051756, 0.01842483139038086, 0.0183767032623291, 0.018386943817138672, 0.018413568496704103, 0.018397184371948243, 0.018351104736328124, 0.01844633674621582, 0.01836851119995117, 0.03775590515136719, 0.018353151321411132, 0.01845043182373047, 0.018341888427734376, 0.018726911544799805, 0.018123775482177733, 0.018150400161743165, 0.018366464614868162, 0.01886310386657715, 0.01907711982727051, 0.018974720001220705, 0.01900444793701172, 0.01890505599975586, 0.018916351318359375, 0.018966527938842775, 0.019076095581054688, 0.018861055374145508, 0.01881497573852539, 0.019140607833862306, 0.019045408248901368, 0.018921440124511718, 0.018963455200195312, 0.018877439498901367, 0.01926144027709961, 0.019165184020996092, 0.018880512237548826, 0.018881536483764647, 0.018955263137817382, 0.018960384368896483, 0.018936832427978514, 0.018729984283447267, 0.018342912673950194, 0.01841459274291992, 0.018329599380493163, 0.01839206314086914, 0.018340864181518556, 0.01840025520324707, 0.018534400939941405, 0.018990079879760743, 0.01900339126586914, 0.018896896362304686, 0.018963455200195312, 0.018910207748413087, 0.01887846374511719, 0.018913280487060546, 0.01883340835571289, 0.018333696365356447, 0.018703359603881836, 0.018351104736328124, 0.01845452880859375, 0.0182794246673584, 0.018318336486816408, 0.01838591957092285, 0.018298879623413086, 0.018270240783691407, 0.019394527435302733, 0.01943961524963379, 0.020576255798339844, 0.019310592651367187, 0.018993152618408202, 0.01903206443786621, 0.018950143814086915, 0.019017728805541992, 0.0377446403503418, 0.018357248306274415, 0.01839820861816406, 0.018373632431030275, 0.018353151321411132, 0.0184453125, 0.018336767196655272, 0.01846784019470215, 0.018374656677246092, 0.018431039810180665, 0.018341823577880858, 0.018308095932006836, 0.018556928634643553, 0.01840127944946289, 0.018554880142211915, 0.0184770565032959, 0.01844428825378418, 0.018405376434326173, 0.018333696365356447, 0.01840947151184082, 0.01842278480529785, 0.01840127944946289, 0.018413568496704103, 0.018343967437744142, 0.018638816833496094, 0.018471935272216796, 0.01844223976135254, 0.018472959518432617, 0.018349056243896485, 0.018336767196655272, 0.01837772750854492, 0.018337791442871093, 0.01840742492675781, 0.01838083267211914, 0.018362335205078125, 0.018448383331298827, 0.01839820861816406, 0.01841459274291992, 0.018397184371948243, 0.018324480056762696, 0.018363391876220703, 0.018415615081787108, 0.018408447265625, 0.018379776000976563, 0.018749439239501953, 0.01843404769897461, 0.018365440368652345, 0.018324480056762696, 0.018372608184814454, 0.018328575134277342, 0.01844223976135254, 0.018338815689086914, 0.018361343383789062, 0.01836031913757324, 0.01841152000427246, 0.018314239501953124, 0.018345983505249023, 0.01840332794189453, 0.018328575134277342, 0.018373632431030275, 0.01841663932800293, 0.018287616729736327, 0.018552831649780274, 0.037754878997802735, 0.018325504302978517, 0.01840025520324707, 0.018291711807250977, 0.018345983505249023, 0.018326528549194337, 0.01842995262145996, 0.018415615081787108, 0.01841971206665039, 0.01842278480529785, 0.018359296798706053, 0.01837772750854492, 0.018334720611572267, 0.018315263748168945, 0.01840947151184082, 0.018386943817138672, 0.0183767032623291, 0.018520063400268554, 0.018358272552490236, 0.018396160125732423, 0.018333696365356447, 0.01839206314086914, 0.018307071685791015, 0.018312192916870116, 0.018371583938598633, 0.018345983505249023, 0.01841663932800293, 0.018457599639892578, 0.01835212707519531, 0.018354175567626953, 0.01828659248352051, 0.018344959259033202, 0.018364416122436524, 0.018355199813842774, 0.018298879623413086, 0.01835212707519531, 0.018412544250488282, 0.018366464614868162, 0.018382848739624022, 0.018255872726440428, 0.018357248306274415, 0.01836953544616699, 0.01902284812927246, 0.020344831466674804, 0.01921331214904785, 0.018961408615112304, 0.018908159255981445, 0.018890752792358398, 0.018924543380737305, 0.019082239151000976, 0.01884364891052246, 0.018367488861083983, 0.019126272201538085, 0.018917375564575196, 0.018936832427978514, 0.01902284812927246, 0.0184268798828125, 0.018364416122436524, 0.018391040802001952, 0.01841049575805664, 0.018320383071899413, 0.018339839935302735, 0.018349056243896485, 0.0375838737487793, 0.018310144424438478, 0.01836031913757324, 0.018404352188110353, 0.018507776260375978, 0.018515968322753908, 0.018348031997680665, 0.018380800247192384, 0.01826201629638672, 0.018463775634765624, 0.018397151947021486, 0.018396160125732423, 0.018347007751464844, 0.018324480056762696, 0.01841459274291992, 0.018471935272216796, 0.018512895584106445, 0.01836031913757324, 0.018316287994384766, 0.018356224060058594, 0.018326528549194337, 0.01836953544616699, 0.018387968063354493, 0.018312192916870116, 0.018323455810546875, 0.018404352188110353, 0.01864806365966797, 0.018514944076538087, 0.018299903869628906, 0.018319360733032225, 0.018310144424438478, 0.018258943557739257, 0.018485248565673826, 0.018364416122436524, 0.018373632431030275, 0.018423807144165038, 0.018341888427734376, 0.018431999206542968, 0.018876415252685547, 0.018487295150756835, 0.01843302345275879, 0.018374656677246092, 0.0185743350982666, 0.01837772750854492, 0.018381824493408205, 0.018363391876220703, 0.01838387107849121, 0.01839923286437988, 0.018378751754760742, 0.01836031913757324, 0.01839411163330078, 0.018412544250488282, 0.018408447265625, 0.018473983764648438, 0.018364416122436524, 0.01840947151184082, 0.018310144424438478, 0.01839206314086914, 0.01851087951660156, 0.018360288619995117, 0.01839206314086914, 0.01836031913757324, 0.01843097686767578, 0.037787647247314454, 0.018329599380493163, 0.01837772750854492, 0.018396160125732423, 0.018449407577514648, 0.01841152000427246, 0.018397184371948243, 0.018374656677246092, 0.018320383071899413, 0.018408447265625, 0.018351104736328124, 0.01837772750854492, 0.018423807144165038, 0.018333696365356447, 0.01864908790588379, 0.018455551147460936, 0.018397184371948243, 0.018354175567626953, 0.018366464614868162, 0.018356224060058594, 0.018347007751464844, 0.01841152000427246, 0.019151872634887695, 0.019474431991577147, 0.019180543899536134, 0.018968576431274413, 0.01903923225402832, 0.018941951751708985, 0.018888704299926756, 0.0189040641784668, 0.018905088424682616, 0.018907136917114258, 0.019160064697265625, 0.01887027168273926, 0.018877439498901367, 0.01868185615539551, 0.018921472549438476, 0.01904025650024414, 0.018936832427978514, 0.018332672119140626, 0.018308095932006836, 0.01839411163330078, 0.018347007751464844, 0.018311168670654295, 0.0183767032623291, 0.01841459274291992, 0.018522111892700196, 0.018931711196899414, 0.019349504470825195, 0.01902284812927246, 0.018991104125976564, 0.018930688858032226, 0.018989055633544923, 0.019001344680786132, 0.018495487213134765, 0.018301952362060548, 0.01869004821777344, 0.01825484848022461, 0.01839206314086914, 0.01840230369567871, 0.01839820861816406, 0.018663423538208008, 0.019091455459594727, 0.038542335510253906, 0.018413568496704103, 0.018481151580810547, 0.018983936309814452, 0.01899519920349121, 0.018893823623657227, 0.018886655807495118, 0.01880575942993164, 0.018921472549438476, 0.018318336486816408, 0.018509855270385744, 0.018419679641723634, 0.018356224060058594, 0.01843404769897461, 0.018962432861328125, 0.01904435157775879, 0.01921843147277832, 0.018980863571166993, 0.01903923225402832, 0.018388992309570314, 0.01842790412902832, 0.01837772750854492, 0.018372608184814454, 0.018331647872924805, 0.01839411163330078, 0.018319360733032225, 0.018305023193359374, 0.018293760299682618, 0.018367488861083983, 0.01842483139038086, 0.018390016555786134, 0.018357280731201173, 0.018384864807128906, 0.01839411163330078, 0.018364416122436524, 0.018327552795410155, 0.01839308738708496, 0.018328575134277342, 0.01826918411254883, 0.0184268798828125, 0.018395135879516602, 0.018295808792114256, 0.018373632431030275, 0.018151424407958985, 0.018207744598388673, 0.01836031913757324, 0.018295808792114256, 0.018780160903930664, 0.018881536483764647, 0.018906112670898437, 0.018965503692626954, 0.018974752426147462, 0.018904031753540038, 0.018936832427978514, 0.018956287384033203, 0.018965503692626954, 0.01840332794189453, 0.018310144424438478, 0.018336767196655272, 0.018404352188110353, 0.018361343383789062, 0.01839923286437988, 0.018463743209838866, 0.038046718597412106, 0.01843507194519043, 0.018305023193359374, 0.018233343124389647, 0.018344959259033202, 0.018366464614868162, 0.018495487213134765, 0.018343967437744142, 0.018420703887939455, 0.01840230369567871, 0.018654207229614257, 0.020091903686523437, 0.019406848907470704, 0.01906073570251465, 0.0190515193939209, 0.018997247695922852, 0.018948095321655273, 0.019203071594238282, 0.01926655960083008, 0.01898700714111328, 0.018949119567871094, 0.01903104019165039, 0.01902079963684082, 0.018894847869873048, 0.018984960556030273, 0.018909183502197266, 0.018982912063598634, 0.019183616638183593, 0.019070976257324217, 0.01901875114440918, 0.018957311630249024, 0.018975744247436522, 0.018861055374145508, 0.018933759689331055, 0.01906073570251465, 0.018932735443115235, 0.018926591873168946, 0.018968576431274413, 0.01884671974182129, 0.018888704299926756, 0.018856960296630858, 0.018906112670898437, 0.018761728286743166, 0.019104768753051758, 0.019098623275756836, 0.01904844856262207, 0.018964479446411133, 0.018893823623657227, 0.018338815689086914, 0.018323455810546875, 0.01824563217163086, 0.018487295150756835, 0.018811904907226562, 0.01941200065612793, 0.01912931251525879, 0.01884364891052246, 0.01884569549560547, 0.018997247695922852, 0.018654207229614257, 0.018766847610473633, 0.01901875114440918, 0.018925567626953126, 0.018929664611816405, 0.04048076629638672, 0.019142656326293944, 0.018959360122680666, 0.018917375564575196, 0.019141632080078123, 0.018944000244140623, 0.01884876823425293, 0.018808832168579103, 0.018886655807495118, 0.018934783935546876, 0.01899929618835449, 0.018940927505493164, 0.018977792739868164, 0.018918399810791017, 0.01899929618835449, 0.018940927505493164, 0.018861055374145508, 0.018836511611938476, 0.018780128479003906, 0.018320383071899413, 0.01839308738708496, 0.01840127944946289, 0.01827737617492676, 0.018310144424438478, 0.01827840042114258, 0.018487295150756835, 0.018775039672851563, 0.018501632690429686, 0.01841663932800293, 0.018305023193359374, 0.019021823883056642, 0.018941951751708985, 0.018931711196899414, 0.01887027168273926, 0.018334720611572267, 0.018359296798706053, 0.018281471252441405, 0.018289663314819335, 0.018388992309570314, 0.018295808792114256, 0.01823539161682129, 0.01827020835876465, 0.018332672119140626, 0.01830297660827637, 0.01837264060974121, 0.018341888427734376, 0.018376672744750976, 0.01881292724609375, 0.0188221435546875, 0.01887948799133301, 0.018420736312866212, 0.018380800247192384, 0.018301952362060548, 0.018333696365356447, 0.018735103607177735, 0.019014656066894533, 0.01880985641479492, 0.018928640365600585, 0.018937856674194335, 0.018965503692626954, 0.018817024230957033, 0.01887027168273926, 0.018896896362304686]",tokens/s,52.763225748056264,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-rw-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-rw-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp3qcxyjvg/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a322e-685433a747aaaf2d0e029c5d;216333ff-d15d-4fbd-8bd2-6ec4232fb801) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667794b9-07ed89fd678545265eda132d;e1f18725-1ea6-4fc5-95ec-a0f10efdc2f5) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31d7-4c3b16b32c4be21a5be19263;7008ee3a-4da9-4941-80e3-5b40f56e9f16) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe8d-0d6e286c52e5ddca7b59fada;cc47109d-8310-4a95-ad53-9a7758bcaa3c) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1224.593408,1005.060096,0.0,358.612992,318.913024,s,23,0.170830846786499,0.007427428121152131,0.00024413026802978468,0.007357215881347657,0.0075946943283081055,0.007852940797805786,0.008269951295852663,"[0.008380415916442872, 0.00787830400466919, 0.007307871818542481, 0.007413631916046143, 0.007416895866394043, 0.0072780799865722655, 0.007253759860992431, 0.007340799808502197, 0.00735097599029541, 0.0072897601127624514, 0.007357215881347657, 0.007351200103759766, 0.007452095985412598, 0.007375872135162354, 0.007336959838867187, 0.007404831886291504, 0.007378367900848388, 0.007350815773010254, 0.007241919994354248, 0.007624671936035156, 0.007474783897399902, 0.00736736011505127, 0.007204256057739258]",tokens/s,34466.843141968995,kWh,8.354114192041346e-08,4.5776604383460934e-08,1.7923360244695143e-07,3.0855134875082585e-07,tokens/kWh,829683620.0406166,MB,1224.593408,1005.060096,0.0,358.612992,328.804864,s,23,10.12951983642578,0.44041390593155577,0.009542742855595688,0.4373620910644531,0.4437781982421875,0.45109108581542967,0.474916849975586,"[0.4814091796875, 0.45189859008789063, 0.4373620910644531, 0.43599356079101564, 0.4377264404296875, 0.4396331787109375, 0.4402783508300781, 0.43823895263671875, 0.43630859375, 0.4375812683105469, 0.43703091430664065, 0.43696951293945313, 0.4364114990234375, 0.43596710205078126, 0.43623919677734374, 0.43678070068359376, 0.43731332397460937, 0.43723104858398437, 0.4438235473632812, 0.44186441040039065, 0.4435968017578125, 0.4382613220214844, 0.4316002502441406]",tokens/s,143.04725430216266,kWh,5.045933914928732e-06,2.7649476769741185e-06,8.713782771215293e-06,1.6524664363118138e-05,tokens/kWh,3812482.8810811713,,s,1448,10.284382205963144,0.007102473899145811,0.0009630348618537469,0.006931456089019775,0.00721827836036682,0.007574528217315674,0.014597724084854125,"[0.007964672088623047, 0.00796569585800171, 0.007897088050842285, 0.00785203218460083, 0.007930880069732665, 0.007737343788146973, 0.007622655868530274, 0.00760422420501709, 0.007505919933319092, 0.0074967041015625, 0.0075335679054260255, 0.007648255825042725, 0.007678976058959961, 0.007658495903015137, 0.007508992195129394, 0.007552000045776367, 0.007705599784851074, 0.00764415979385376, 0.007676928043365478, 0.007573503971099854, 0.007642111778259277, 0.007664639949798584, 0.00765337610244751, 0.0074629120826721195, 0.007479296207427978, 0.007540736198425293, 0.007504896163940429, 0.007546879768371582, 0.007592959880828858, 0.007724031925201416, 0.007574528217315674, 0.007631872177124023, 0.007477248191833496, 0.007482367992401123, 0.007755775928497314, 0.0074967041015625, 0.007511040210723877, 0.007269375801086426, 0.007320576190948487, 0.007468031883239746, 0.007742464065551758, 0.00800051212310791, 0.007601151943206787, 0.007601151943206787, 0.008147968292236327, 0.008087552070617676, 0.007609344005584716, 0.007886847972869874, 0.007772160053253174, 0.007862271785736084, 0.0077844481468200685, 0.00765235185623169, 0.007622655868530274, 0.007872511863708496, 0.007607295989990235, 0.007804927825927735, 0.007388160228729248, 0.007648255825042725, 0.007409664154052734, 0.007129087924957276, 0.007120895862579346, 0.007165952205657959, 0.01587097644805908, 0.007638016223907471, 0.007574528217315674, 0.007745535850524903, 0.007613440036773681, 0.007448575973510742, 0.00725708818435669, 0.00749567985534668, 0.007291903972625732, 0.007451648235321045, 0.007557119846343994, 0.007046144008636474, 0.007103487968444824, 0.006964223861694336, 0.007108607769012451, 0.007080959796905518, 0.007032832145690918, 0.006998047828674316, 0.007235551834106445, 0.006897664070129395, 0.007023615837097168, 0.007216127872467041, 0.007019552230834961, 0.006999008178710938, 0.007031807899475098, 0.007042079925537109, 0.006928351879119873, 0.007155712127685547, 0.006977536201477051, 0.007123968124389648, 0.006990848064422607, 0.007591936111450195, 0.007071743965148926, 0.006994944095611572, 0.006910975933074951, 0.0069928960800170895, 0.007258111953735351, 0.006973440170288086, 0.007234591960906983, 0.0069836478233337405, 0.007014400005340577, 0.006899712085723877, 0.007013376235961914, 0.007256063938140869, 0.006994944095611572, 0.007252992153167725, 0.007035903930664063, 0.007158783912658692, 0.006909952163696289, 0.006960127830505371, 0.0070594558715820314, 0.007104512214660645, 0.006937600135803222, 0.0069632000923156735, 0.0069324798583984375, 0.007048192024230957, 0.00692633581161499, 0.00800153636932373, 0.007895040035247802, 0.007285759925842285, 0.007116799831390381, 0.00709119987487793, 0.007049215793609619, 0.01459609603881836, 0.00693555212020874, 0.006933504104614257, 0.006979584217071533, 0.006875135898590088, 0.00694271993637085, 0.006948863983154297, 0.006976511955261231, 0.006959104061126709, 0.0069283838272094726, 0.00693452787399292, 0.006937600135803222, 0.006903808116912841, 0.006919167995452881, 0.0069283838272094726, 0.006905856132507324, 0.007003136157989502, 0.007137279987335205, 0.006895616054534912, 0.006892543792724609, 0.0068986878395080565, 0.006917119979858399, 0.00692633581161499, 0.006939648151397705, 0.00692633581161499, 0.006933504104614257, 0.00690176010131836, 0.006895616054534912, 0.006918144226074219, 0.00692633581161499, 0.0068986878395080565, 0.006986752033233643, 0.006941696166992187, 0.00695091199874878, 0.0068853759765625, 0.0069324798583984375, 0.006906879901885986, 0.006952960014343262, 0.006914048194885254, 0.00693555212020874, 0.00689356803894043, 0.006964223861694336, 0.006908927917480469, 0.006937600135803222, 0.00693555212020874, 0.007006207942962647, 0.006982656002044678, 0.007010303974151612, 0.006971392154693603, 0.006910975933074951, 0.006913023948669434, 0.006918144226074219, 0.006919167995452881, 0.006913023948669434, 0.006947840213775635, 0.006929408073425293, 0.006910975933074951, 0.0069027838706970214, 0.006880256175994873, 0.006883327960968018, 0.0069283838272094726, 0.007147520065307617, 0.006989823818206787, 0.014668800354003907, 0.006913023948669434, 0.00693452787399292, 0.006882304191589355, 0.00694374418258667, 0.006876160144805908, 0.006906879901885986, 0.006904831886291504, 0.0068884482383728025, 0.006895616054534912, 0.006920191764831543, 0.006875135898590088, 0.006978559970855713, 0.006938623905181885, 0.006918144226074219, 0.007023615837097168, 0.006938623905181885, 0.006894591808319092, 0.006882304191589355, 0.006928415775299073, 0.006888415813446045, 0.006894591808319092, 0.006958079814910889, 0.006945792198181152, 0.006919167995452881, 0.006920191764831543, 0.006819839954376221, 0.006814720153808594, 0.006752255916595459, 0.006714367866516113, 0.006791168212890625, 0.00672870397567749, 0.0067870721817016606, 0.006815743923187256, 0.006841343879699707, 0.006661119937896728, 0.00682700777053833, 0.0067348480224609375, 0.006766592025756836, 0.0067870721817016606, 0.006717440128326416, 0.006973440170288086, 0.007019519805908203, 0.006986752033233643, 0.007194623947143554, 0.006993919849395752, 0.0069632000923156735, 0.006990848064422607, 0.007023615837097168, 0.006989823818206787, 0.006961152076721191, 0.007007232189178467, 0.006951935768127441, 0.007015423774719238, 0.006973440170288086, 0.006993919849395752, 0.006976511955261231, 0.0072765440940856935, 0.006922239780426025, 0.007046144008636474, 0.006909952163696289, 0.007103487968444824, 0.006964223861694336, 0.014816287994384766, 0.0069242558479309085, 0.007008255958557129, 0.006899712085723877, 0.006957056045532227, 0.006952960014343262, 0.006903808116912841, 0.007002111911773682, 0.007018496036529541, 0.0069847040176391605, 0.007208960056304932, 0.006906879901885986, 0.007000127792358399, 0.006973375797271728, 0.006986783981323242, 0.007034848213195801, 0.006924287796020508, 0.006998015880584717, 0.006948863983154297, 0.00688640022277832, 0.00694374418258667, 0.006968319892883301, 0.006874112129211426, 0.006914048194885254, 0.00687718391418457, 0.006922239780426025, 0.00687820816040039, 0.0068986878395080565, 0.006887423992156983, 0.006941696166992187, 0.006927360057830811, 0.006922239780426025, 0.006933504104614257, 0.006924287796020508, 0.006929408073425293, 0.00690176010131836, 0.006960127830505371, 0.006919167995452881, 0.006922239780426025, 0.006909952163696289, 0.006914048194885254, 0.006906879901885986, 0.00694374418258667, 0.006897664070129395, 0.007015423774719238, 0.006918144226074219, 0.006923264026641846, 0.006927360057830811, 0.00693555212020874, 0.006977536201477051, 0.006917119979858399, 0.006937600135803222, 0.0069212160110473635, 0.00692633581161499, 0.006949888229370117, 0.006940671920776367, 0.006956031799316406, 0.006988800048828125, 0.0069283838272094726, 0.006905856132507324, 0.006929408073425293, 0.006905888080596924, 0.006922207832336426, 0.014533632278442383, 0.006973440170288086, 0.006919167995452881, 0.006920191764831543, 0.006881279945373535, 0.006919167995452881, 0.006936575889587402, 0.006895616054534912, 0.006916096210479736, 0.006924287796020508, 0.006922239780426025, 0.006907904148101806, 0.006941696166992187, 0.006944767951965332, 0.007005184173583984, 0.006919167995452881, 0.00692633581161499, 0.00687820816040039, 0.0069324798583984375, 0.006889472007751465, 0.006889472007751465, 0.007050240039825439, 0.00713318395614624, 0.007243775844573975, 0.007284736156463623, 0.0071905279159545895, 0.007258111953735351, 0.007041024208068848, 0.006989823818206787, 0.007035903930664063, 0.007030784130096435, 0.0069918718338012695, 0.006993919849395752, 0.006941696166992187, 0.0069253120422363285, 0.006946815967559815, 0.006966271877288818, 0.006931456089019775, 0.007050240039825439, 0.006999040126800537, 0.006977536201477051, 0.006973440170288086, 0.006953983783721924, 0.006923264026641846, 0.006951935768127441, 0.006907904148101806, 0.007014431953430176, 0.006917088031768799, 0.006968319892883301, 0.006927360057830811, 0.006930431842803955, 0.00694271993637085, 0.006913023948669434, 0.007008255958557129, 0.006936575889587402, 0.00692633581161499, 0.006958079814910889, 0.007015423774719238, 0.006990848064422607, 0.006922239780426025, 0.00692633581161499, 0.00693452787399292, 0.007012351989746094, 0.014345215797424317, 0.00676966381072998, 0.006661119937896728, 0.0067717118263244626, 0.006789120197296142, 0.006773759841918945, 0.00672051191329956, 0.006830080032348633, 0.006714367866516113, 0.006793216228485107, 0.006797311782836914, 0.00676358413696289, 0.0067491202354431155, 0.006756351947784424, 0.006776832103729248, 0.006715392112731934, 0.006752255916595459, 0.007037951946258545, 0.0069959678649902345, 0.006970367908477783, 0.00693555212020874, 0.006985727787017822, 0.0073062400817871095, 0.006953983783721924, 0.00690176010131836, 0.007269375801086426, 0.006917119979858399, 0.006968319892883301, 0.007024640083312988, 0.007193600177764893, 0.007230463981628418, 0.006991936206817627, 0.007226304054260254, 0.006900735855102539, 0.007223296165466309, 0.0069970240592956544, 0.006896607875823975, 0.007322624206542969, 0.007062528133392334, 0.006974463939666748, 0.00688640022277832, 0.007178239822387696, 0.0069621758460998535, 0.007023615837097168, 0.0069212160110473635, 0.007299071788787842, 0.0071833600997924804, 0.007261184215545655, 0.007209983825683594, 0.007198719978332519, 0.007049215793609619, 0.0069959678649902345, 0.007003136157989502, 0.007105535984039306, 0.007074816226959229, 0.006998015880584717, 0.0069918718338012695, 0.00690176010131836, 0.006965248107910156, 0.007762944221496582, 0.006952960014343262, 0.006976511955261231, 0.0069202561378479, 0.01471072006225586, 0.0072724480628967286, 0.006916096210479736, 0.006939648151397705, 0.006899712085723877, 0.007030784130096435, 0.006989823818206787, 0.006977536201477051, 0.007060480117797851, 0.0072120318412780765, 0.006953983783721924, 0.006909952163696289, 0.006884352207183838, 0.006938623905181885, 0.006926400184631348, 0.00695084810256958, 0.006960127830505371, 0.006939648151397705, 0.0068853759765625, 0.006904831886291504, 0.006896639823913574, 0.006917119979858399, 0.006908959865570068, 0.006914015769958496, 0.006940671920776367, 0.006941696166992187, 0.006894591808319092, 0.006892543792724609, 0.007236608028411865, 0.007029759883880615, 0.006912000179290771, 0.006895679950714112, 0.006928319931030273, 0.006906879901885986, 0.006958079814910889, 0.006897664070129395, 0.006916096210479736, 0.006875167846679688, 0.006910943984985351, 0.00690176010131836, 0.00689356803894043, 0.0069027838706970214, 0.007012351989746094, 0.006970367908477783, 0.006946879863739014, 0.006910912036895752, 0.006938623905181885, 0.006899712085723877, 0.0069028158187866215, 0.006964191913604736, 0.006917119979858399, 0.006930431842803955, 0.006939648151397705, 0.0070594558715820314, 0.006999040126800537, 0.006949888229370117, 0.00695091199874878, 0.007007232189178467, 0.006937600135803222, 0.006905856132507324, 0.006920191764831543, 0.006880256175994873, 0.00692633581161499, 0.014599167823791503, 0.0069253120422363285, 0.006918144226074219, 0.006918144226074219, 0.006866943836212158, 0.006870016098022461, 0.00694374418258667, 0.0069816322326660156, 0.006914048194885254, 0.006931456089019775, 0.006946815967559815, 0.006905856132507324, 0.006940671920776367, 0.006916096210479736, 0.007143424034118652, 0.006927360057830811, 0.006937600135803222, 0.006957056045532227, 0.006896639823913574, 0.006907904148101806, 0.006887423992156983, 0.006913023948669434, 0.006900735855102539, 0.006895616054534912, 0.006890495777130127, 0.0068915200233459475, 0.006884352207183838, 0.0069324798583984375, 0.006916096210479736, 0.006897664070129395, 0.006910975933074951, 0.006903808116912841, 0.00693452787399292, 0.007013376235961914, 0.00692633581161499, 0.006889472007751465, 0.006912000179290771, 0.0068986878395080565, 0.006977536201477051, 0.006904895782470703, 0.006909887790679931, 0.006899712085723877, 0.006912000179290771, 0.006887423992156983, 0.006910975933074951, 0.006879231929779053, 0.006884352207183838, 0.0068915200233459475, 0.006903808116912841, 0.006946815967559815, 0.006875135898590088, 0.006924287796020508, 0.006918144226074219, 0.006905856132507324, 0.006973440170288086, 0.006930431842803955, 0.006915071964263916, 0.00691206407546997, 0.007011263847351074, 0.0069212160110473635, 0.006913023948669434, 0.006920191764831543, 0.006917119979858399, 0.014561280250549317, 0.006904831886291504, 0.0069283838272094726, 0.006930431842803955, 0.006904831886291504, 0.006907904148101806, 0.006908927917480469, 0.006931456089019775, 0.0069283838272094726, 0.006903808116912841, 0.006924287796020508, 0.006895616054534912, 0.006883327960968018, 0.007070720195770264, 0.006931456089019775, 0.006934559822082519, 0.006951903820037842, 0.007005184173583984, 0.0070225920677185055, 0.006919167995452881, 0.006883327960968018, 0.006973440170288086, 0.006927360057830811, 0.006957056045532227, 0.006895616054534912, 0.006903808116912841, 0.007022624015808106, 0.007080927848815918, 0.006919167995452881, 0.006929408073425293, 0.006917119979858399, 0.006916096210479736, 0.006899712085723877, 0.006899712085723877, 0.006895616054534912, 0.006917119979858399, 0.006946815967559815, 0.006915071964263916, 0.006927360057830811, 0.006884352207183838, 0.006908927917480469, 0.00687820816040039, 0.006930431842803955, 0.006919167995452881, 0.006918144226074219, 0.007233535766601563, 0.007015423774719238, 0.006924287796020508, 0.006957056045532227, 0.0069253120422363285, 0.006894591808319092, 0.006938623905181885, 0.006927360057830811, 0.006912000179290771, 0.006912000179290771, 0.006947840213775635, 0.007037951946258545, 0.006979584217071533, 0.006904831886291504, 0.006974463939666748, 0.006968319892883301, 0.006972415924072266, 0.006949888229370117, 0.014623744010925293, 0.006917119979858399, 0.006960127830505371, 0.006922239780426025, 0.0069283838272094726, 0.00698367977142334, 0.0068986878395080565, 0.006946815967559815, 0.006912000179290771, 0.0069027838706970214, 0.00688640022277832, 0.0069324798583984375, 0.006915071964263916, 0.00689356803894043, 0.006929408073425293, 0.006899712085723877, 0.006931456089019775, 0.006884352207183838, 0.006897664070129395, 0.006930431842803955, 0.006949888229370117, 0.006898752212524414, 0.006896575927734375, 0.006960127830505371, 0.006882304191589355, 0.006930431842803955, 0.006897664070129395, 0.006989823818206787, 0.006908927917480469, 0.006970367908477783, 0.007172095775604248, 0.007028736114501953, 0.006956031799316406, 0.00694374418258667, 0.006889472007751465, 0.0068915200233459475, 0.006998015880584717, 0.006939648151397705, 0.0069283838272094726, 0.006924352169036865, 0.006933440208435058, 0.0069253120422363285, 0.006907904148101806, 0.007010303974151612, 0.006948863983154297, 0.006965248107910156, 0.0069324798583984375, 0.006885407924652099, 0.006927328109741211, 0.006943808078765869, 0.006981567859649658, 0.006958079814910889, 0.006923295974731445, 0.006906847953796386, 0.00693452787399292, 0.0068986878395080565, 0.006922239780426025, 0.006908927917480469, 0.006931456089019775, 0.006900735855102539, 0.006931456089019775, 0.006889472007751465, 0.006872096061706543, 0.01465238380432129, 0.006907904148101806, 0.006927360057830811, 0.006946815967559815, 0.006900735855102539, 0.006948863983154297, 0.006927360057830811, 0.006892543792724609, 0.006960127830505371, 0.006889503955841064, 0.00689353609085083, 0.0069621758460998535, 0.006900735855102539, 0.006875135898590088, 0.006910975933074951, 0.006908927917480469, 0.006912000179290771, 0.006855679988861084, 0.006907904148101806, 0.007006207942962647, 0.006874112129211426, 0.006908927917480469, 0.006940671920776367, 0.006881279945373535, 0.00693452787399292, 0.0068986878395080565, 0.006946815967559815, 0.006880256175994873, 0.00688640022277832, 0.006918144226074219, 0.0069283838272094726, 0.0068915200233459475, 0.00698367977142334, 0.0069283838272094726, 0.006909952163696289, 0.007021567821502686, 0.006881279945373535, 0.0070522880554199216, 0.0069324798583984375, 0.00693555212020874, 0.006896639823913574, 0.006949920177459717, 0.006969312191009522, 0.006952960014343262, 0.006909952163696289, 0.006976511955261231, 0.0068853759765625, 0.006871039867401123, 0.006940671920776367, 0.006964223861694336, 0.006941696166992187, 0.006859776020050049, 0.006858751773834228, 0.006897664070129395, 0.00692633581161499, 0.006903808116912841, 0.006887423992156983, 0.006958079814910889, 0.006865920066833496, 0.006944767951965332, 0.007127039909362793, 0.00709119987487793, 0.007139328002929687, 0.014806015968322754, 0.006964223861694336, 0.00694271993637085, 0.006897664070129395, 0.00694271993637085, 0.006931456089019775, 0.0069110398292541505, 0.006891456127166748, 0.006899712085723877, 0.006905856132507324, 0.0069027838706970214, 0.006922239780426025, 0.006948863983154297, 0.00690176010131836, 0.006965248107910156, 0.00694271993637085, 0.006922304153442383, 0.006911935806274414, 0.006879231929779053, 0.007000063896179199, 0.007001088142395019, 0.006974463939666748, 0.006907904148101806, 0.006933504104614257, 0.00693452787399292, 0.006912000179290771, 0.006887423992156983, 0.00692633581161499, 0.006903808116912841, 0.006927360057830811, 0.006933504104614257, 0.006930431842803955, 0.006933504104614257, 0.0069253120422363285, 0.006906879901885986, 0.00693452787399292, 0.007009280204772949, 0.0068915200233459475, 0.006912000179290771, 0.006920191764831543, 0.006969344139099121, 0.006940671920776367, 0.006894591808319092, 0.006872064113616944, 0.006859776020050049, 0.0069253120422363285, 0.006975488185882568, 0.007011328220367432, 0.006889472007751465, 0.006876160144805908, 0.00688643217086792, 0.006930399894714356, 0.006944767951965332, 0.006916096210479736, 0.006895616054534912, 0.006900735855102539, 0.006912000179290771, 0.0068986878395080565, 0.006919167995452881, 0.00687718391418457, 0.006874112129211426, 0.0068915200233459475, 0.006866943836212158, 0.014652416229248047, 0.0069212160110473635, 0.006944767951965332, 0.00687718391418457, 0.0069642882347106935, 0.006937535762786865, 0.006914048194885254, 0.0069212160110473635, 0.006927360057830811, 0.0069253120422363285, 0.006930431842803955, 0.006933504104614257, 0.006915103912353515, 0.0069283838272094726, 0.006927328109741211, 0.006971392154693603, 0.006904831886291504, 0.006937600135803222, 0.006914048194885254, 0.006896639823913574, 0.00689356803894043, 0.006919167995452881, 0.006933504104614257, 0.006907904148101806, 0.0068689918518066405, 0.006903808116912841, 0.007013376235961914, 0.006936575889587402, 0.006941696166992187, 0.006883359909057617, 0.006904799938201905, 0.00687718391418457, 0.00688643217086792, 0.006876128196716309, 0.006919167995452881, 0.006908927917480469, 0.0068925762176513675, 0.0069201598167419436, 0.006894591808319092, 0.0068689918518066405, 0.006906879901885986, 0.006959104061126709, 0.006904831886291504, 0.006931488037109375, 0.0068873920440673825, 0.006906879901885986, 0.006896671772003174, 0.006886367797851563, 0.006938623905181885, 0.00687820816040039, 0.00687718391418457, 0.006907904148101806, 0.006998015880584717, 0.0069027838706970214, 0.006923264026641846, 0.006890495777130127, 0.0068986878395080565, 0.006960127830505371, 0.0069027838706970214, 0.0069212160110473635, 0.0068884482383728025, 0.006894591808319092, 0.006938623905181885, 0.014593024253845215, 0.006933504104614257, 0.006898719787597657, 0.0069242558479309085, 0.006897664070129395, 0.006896639823913574, 0.0069253120422363285, 0.006961152076721191, 0.006929408073425293, 0.006920191764831543, 0.006914048194885254, 0.0069283838272094726, 0.0068915200233459475, 0.006884352207183838, 0.006931456089019775, 0.006946815967559815, 0.0069027838706970214, 0.006856704235076904, 0.0069253120422363285, 0.0069212160110473635, 0.006968319892883301, 0.006905856132507324, 0.006894591808319092, 0.006916096210479736, 0.006867968082427979, 0.0069324798583984375, 0.006917119979858399, 0.006860799789428711, 0.007000063896179199, 0.007016448020935059, 0.007054336071014404, 0.006895679950714112, 0.006895552158355713, 0.006907904148101806, 0.0069283838272094726, 0.006920191764831543, 0.006859776020050049, 0.007019519805908203, 0.006895616054534912, 0.006855679988861084, 0.006979584217071533, 0.006900735855102539, 0.006946815967559815, 0.006892543792724609, 0.006904831886291504, 0.006873087882995605, 0.006938623905181885, 0.006903808116912841, 0.006917119979858399, 0.006920191764831543, 0.006860799789428711, 0.006915071964263916, 0.006945792198181152, 0.006906879901885986, 0.006913023948669434, 0.0068884482383728025, 0.006949888229370117, 0.006906879901885986, 0.007002111911773682, 0.006951935768127441, 0.006909952163696289, 0.0069027838706970214, 0.006880256175994873, 0.014742527961730957, 0.006971392154693603, 0.006909952163696289, 0.006938623905181885, 0.0068884482383728025, 0.006948863983154297, 0.006922239780426025, 0.006908927917480469, 0.0069283838272094726, 0.006914048194885254, 0.00687820816040039, 0.006920191764831543, 0.0068884482383728025, 0.006875135898590088, 0.006957056045532227, 0.0069212160110473635, 0.00693452787399292, 0.006905856132507324, 0.006978559970855713, 0.006904831886291504, 0.0069253120422363285, 0.006929408073425293, 0.006948863983154297, 0.006897664070129395, 0.006883327960968018, 0.00692633581161499, 0.006916096210479736, 0.006927360057830811, 0.007006207942962647, 0.006929408073425293, 0.0069212160110473635, 0.006900735855102539, 0.0069253120422363285, 0.006931456089019775, 0.006907904148101806, 0.00709939193725586, 0.006915071964263916, 0.00694374418258667, 0.006969344139099121, 0.006910975933074951, 0.006994944095611572, 0.006936575889587402, 0.0069069118499755855, 0.006930399894714356, 0.006910975933074951, 0.0069253120422363285, 0.00690176010131836, 0.00689356803894043, 0.006899712085723877, 0.0068853759765625, 0.006951935768127441, 0.006900735855102539, 0.006927360057830811, 0.006890495777130127, 0.0068915200233459475, 0.0069027838706970214, 0.006995999813079834, 0.006994912147521973, 0.006920191764831543, 0.0069324798583984375, 0.006895616054534912, 0.0069550080299377445, 0.006908927917480469, 0.014665727615356445, 0.006914048194885254, 0.006918144226074219, 0.006918144226074219, 0.006912000179290771, 0.00694374418258667, 0.00690176010131836, 0.006952960014343262, 0.006883327960968018, 0.006876160144805908, 0.006899712085723877, 0.006956031799316406, 0.006949888229370117, 0.006945792198181152, 0.0069253120422363285, 0.006944767951965332, 0.006945792198181152, 0.006855679988861084, 0.006905856132507324, 0.006890495777130127, 0.006905856132507324, 0.0069027838706970214, 0.006953983783721924, 0.006895616054534912, 0.007087103843688965, 0.007011328220367432, 0.006944767951965332, 0.0069253120422363285, 0.007228415966033935, 0.006970367908477783, 0.00694271993637085, 0.006924287796020508, 0.0069959678649902345, 0.007029759883880615, 0.006946815967559815, 0.006916096210479736, 0.0068915200233459475, 0.006931456089019775, 0.0068915200233459475, 0.006907904148101806, 0.006862847805023193, 0.006905856132507324, 0.006903808116912841, 0.006897664070129395, 0.006985727787017822, 0.0068986878395080565, 0.006974463939666748, 0.007048192024230957, 0.006961152076721191, 0.00692633581161499, 0.006879231929779053, 0.0068853759765625, 0.006927360057830811, 0.0069283838272094726, 0.006991936206817627, 0.006887360095977783, 0.006966271877288818, 0.006919167995452881, 0.006979584217071533, 0.006908927917480469, 0.006952960014343262, 0.006882304191589355, 0.006916096210479736, 0.014645248413085938, 0.006903808116912841, 0.0069550080299377445, 0.006904831886291504, 0.006919167995452881, 0.006909952163696289, 0.00693452787399292, 0.006914048194885254, 0.006996992111206054, 0.006957056045532227, 0.006909952163696289, 0.006961152076721191, 0.006930431842803955, 0.00694374418258667, 0.006899712085723877, 0.007104512214660645, 0.006967296123504638, 0.0068915200233459475, 0.006910975933074951, 0.006908927917480469, 0.007009280204772949, 0.006941696166992187, 0.006906879901885986, 0.006910975933074951, 0.006908927917480469, 0.006881279945373535, 0.006944767951965332, 0.0070225920677185055, 0.007060480117797851, 0.006876160144805908, 0.006990848064422607, 0.006943808078765869, 0.006904767990112305, 0.006899712085723877, 0.006931456089019775, 0.006919167995452881, 0.006939648151397705, 0.006909952163696289, 0.0069325118064880375, 0.006895584106445312, 0.006896639823913574, 0.006879231929779053, 0.00690176010131836, 0.006894591808319092, 0.0068884482383728025, 0.006903808116912841, 0.006913087844848633, 0.006928319931030273, 0.006899712085723877, 0.006903840065002441, 0.0069242558479309085, 0.0068915200233459475, 0.006914048194885254, 0.006912000179290771, 0.006933504104614257, 0.006919167995452881, 0.006971392154693603, 0.007261184215545655, 0.006969344139099121, 0.006930431842803955, 0.006971392154693603, 0.006887423992156983, 0.006945792198181152, 0.014636063575744629, 0.007252960205078125, 0.007076863765716553, 0.007001088142395019, 0.006970367908477783, 0.006974463939666748, 0.006895616054534912, 0.0069847040176391605, 0.006874112129211426, 0.0070522880554199216, 0.0069263682365417484, 0.007037919998168945, 0.006966271877288818, 0.007018496036529541, 0.007205887794494629, 0.006953983783721924, 0.0069959678649902345, 0.00704307222366333, 0.00689356803894043, 0.006999040126800537, 0.006931456089019775, 0.0070225920677185055, 0.006976511955261231, 0.00693452787399292, 0.007004159927368164, 0.007181312084197998, 0.006958079814910889, 0.006990848064422607, 0.006975488185882568, 0.006904831886291504, 0.007112703800201416, 0.007269375801086426, 0.0069632000923156735, 0.006927360057830811, 0.00698473596572876, 0.006968287944793701, 0.006958079814910889, 0.007260159969329834, 0.007002111911773682, 0.006919167995452881, 0.006956031799316406, 0.007010303974151612, 0.006985727787017822, 0.007192575931549072, 0.006986752033233643, 0.007312384128570557, 0.007241727828979493, 0.007263232231140137, 0.007411712169647216, 0.007120895862579346, 0.007129087924957276, 0.007027711868286133, 0.007016448020935059, 0.00725708818435669, 0.006986752033233643, 0.007013376235961914, 0.006919167995452881, 0.007062528133392334, 0.007166975975036621, 0.007005184173583984, 0.007010303974151612, 0.007048192024230957, 0.00693452787399292, 0.015050751686096191, 0.006931456089019775, 0.007106560230255127, 0.007230463981628418, 0.007049215793609619, 0.006980607986450196, 0.006979584217071533, 0.0069621758460998535, 0.006921279907226563, 0.007070655822753906, 0.007021567821502686, 0.006990880012512207, 0.0069836478233337405, 0.007192575931549072, 0.007269375801086426, 0.006907904148101806, 0.007000063896179199, 0.006956031799316406, 0.0068915200233459475, 0.006993919849395752, 0.006939648151397705, 0.0068915200233459475, 0.006939648151397705, 0.006964223861694336, 0.006922239780426025, 0.007088128089904785, 0.006985727787017822, 0.00687820816040039, 0.006965248107910156, 0.006884352207183838, 0.007039999961853028, 0.00694271993637085, 0.007050240039825439, 0.006924287796020508, 0.006986752033233643, 0.007234560012817383, 0.007004159927368164, 0.007000063896179199, 0.006998015880584717, 0.0069550080299377445, 0.006915071964263916, 0.007016448020935059, 0.006907904148101806, 0.006964223861694336, 0.0069918718338012695, 0.006897664070129395, 0.00707583999633789, 0.006890495777130127, 0.007128064155578613, 0.007211040019989014, 0.006999008178710938, 0.006974463939666748, 0.006976511955261231, 0.006882304191589355, 0.006985727787017822, 0.00693555212020874, 0.00790015983581543, 0.007143424034118652, 0.006940671920776367, 0.0069632000923156735, 0.006894591808319092, 0.0069816322326660156, 0.0068915200233459475, 0.014715904235839844, 0.00690176010131836, 0.006988800048828125, 0.006951935768127441, 0.006908927917480469, 0.006919167995452881, 0.00688640022277832, 0.0069212160110473635, 0.0068884482383728025, 0.006951935768127441, 0.006890495777130127, 0.0068689918518066405, 0.006906879901885986, 0.007001120090484619, 0.006945759773254395, 0.006952960014343262, 0.0069918718338012695, 0.006864895820617676, 0.006909952163696289, 0.006866943836212158, 0.006892543792724609, 0.006919167995452881, 0.0070348801612854, 0.0070563840866088865, 0.0070052480697631835, 0.006940608024597168, 0.006910975933074951, 0.006906879901885986, 0.007321599960327148, 0.007094272136688232, 0.007371776103973389, 0.007673855781555176, 0.007137279987335205, 0.006964223861694336, 0.006982656002044678, 0.007284736156463623, 0.006924287796020508, 0.007064576148986816, 0.007103487968444824, 0.006976511955261231, 0.006929408073425293, 0.006939648151397705, 0.0069816322326660156, 0.006917119979858399, 0.006888480186462403, 0.007059423923492431, 0.006910975933074951, 0.006922239780426025, 0.006870016098022461, 0.006909952163696289, 0.006894591808319092, 0.007114751815795899, 0.007982079982757568, 0.007064576148986816, 0.007649280071258545, 0.0077281279563903805, 0.00708403205871582, 0.0070225920677185055, 0.007300096035003662, 0.007173120021820068, 0.007039999961853028, 0.006969344139099121, 0.006939648151397705, 0.014486528396606446, 0.006723584175109864, 0.006748159885406494, 0.006717440128326416, 0.0067758078575134275, 0.0067573761940002445, 0.006811647891998291, 0.0067983360290527345, 0.006649856090545654, 0.006770688056945801, 0.006931456089019775, 0.0067983360290527345, 0.0067348480224609375, 0.006781951904296875, 0.006737919807434082, 0.0067573761940002445, 0.006744063854217529, 0.006760447978973389, 0.006822912216186523, 0.006897664070129395, 0.007024640083312988, 0.006904831886291504, 0.007005184173583984, 0.00687820816040039, 0.0070266880989074704, 0.007021567821502686, 0.006913023948669434, 0.006964223861694336, 0.007012351989746094, 0.006915071964263916, 0.007185408115386963, 0.007006207942962647, 0.006979584217071533, 0.00723967981338501, 0.007209983825683594, 0.006987775802612305, 0.0069212160110473635, 0.0069621758460998535, 0.006931456089019775, 0.007063551902770996, 0.0069847040176391605, 0.00698367977142334, 0.007051263809204102, 0.007016448020935059, 0.007003136157989502, 0.007104512214660645, 0.007120895862579346, 0.007079936027526855, 0.007358463764190673, 0.00728166389465332, 0.006987775802612305, 0.0068915200233459475, 0.006978559970855713, 0.007234560012817383, 0.007006207942962647, 0.0069283838272094726, 0.007095295906066895, 0.007061503887176514, 0.007015423774719238, 0.007035903930664063, 0.007011328220367432, 0.007107615947723389, 0.006958047866821289, 0.014362624168395996, 0.006820864200592041, 0.006730751991271973, 0.00682700777053833, 0.00679423999786377, 0.0068280320167541505, 0.006766592025756836, 0.0067276802062988285, 0.006837247848510742, 0.006752255916595459, 0.006815807819366455, 0.006806464195251465, 0.006740992069244385, 0.006767615795135498, 0.006711296081542969, 0.006871039867401123, 0.006770688056945801, 0.006800384044647217, 0.0068393278121948245, 0.00679318380355835, 0.006830080032348633, 0.006724607944488525, 0.00673689603805542, 0.006818816184997558, 0.00675328016281128, 0.006808576107025147, 0.006770688056945801, 0.006814720153808594, 0.006792191982269287, 0.006724607944488525, 0.00677785587310791, 0.006762495994567871, 0.0067041277885437015, 0.006730751991271973, 0.006803455829620361, 0.00672160005569458, 0.006800320148468018, 0.006797376155853271, 0.006750144004821777, 0.006681600093841553, 0.0067348480224609375, 0.0067358717918396, 0.006731776237487793, 0.006802432060241699, 0.006825984001159668, 0.006759424209594727, 0.006781951904296875, 0.007107583999633789, 0.007262207984924316, 0.006866943836212158, 0.006956031799316406, 0.00722431993484497, 0.006994944095611572, 0.007202847957611084, 0.007022560119628906, 0.006889472007751465, 0.007014400005340577, 0.006987775802612305, 0.007175168037414551, 0.0074291200637817386, 0.0069550080299377445, 0.006973440170288086, 0.0067645440101623535]",tokens/s,140.7960119529994,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a306f-216d4bb160d7946b28d9a9f4;bd7eb209-3f71-4879-aa74-e3b9bff6f748) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpa7iq6go7/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,.,.,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: . does not appear to have a file named config.json. Checkout 'https://huggingface.co/./tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-7b,tiiuae/falcon-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-7b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-7b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 613, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 419, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 626, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,m,m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/m/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2b81-07a6d5ce14a8cd862322a9df;e006f97c-5a6b-42cc-9f32-e336933f23ed) Repository Not Found for url: https://huggingface.co/m/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: m is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,v,v,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/v/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33a2-175478ba6b90285b4ce3cd0a;b0291919-c384-481b-ac0c-3a459a85cf18) Repository Not Found for url: https://huggingface.co/v/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: v is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-12b,stabilityai/stablelm-2-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-7b-hf,meta-llama/Llama-2-7b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 613, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 419, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 626, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,0,0,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/0/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a33fa-610798ec3c9d53c70bb29833;1b7ed9bc-0b4a-46e4-8b3b-9e97d98016ab) Repository Not Found for url: https://huggingface.co/0/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 0 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-70b-hf,meta-llama/Llama-2-70b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 112.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,2,2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/2/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32a0-6b3e7e301422f37264407cf1;d04f2554-625f-4fea-b2cc-3755d10e138d) Repository Not Found for url: https://huggingface.co/2/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 2 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,l,l,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/l/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a30ef-52bc31bb60881564484b48e8;f8f68ea8-2783-4ca3-a238-86f25a324b1e) Repository Not Found for url: https://huggingface.co/l/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: l is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-70B,meta-llama/Meta-Llama-3-70B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,s,s,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/s/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2c27-7e58fa523d700a3225be242f;5df3a9db-70a5-4cba-b48d-76582808b383) Repository Not Found for url: https://huggingface.co/s/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: s is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,B,B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a32f6-3fc6ced3288a0e593cc08c7c;fcaaee8d-d2f5-4f75-bc2a-77e51eb8b072) Repository Not Found for url: https://huggingface.co/B/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: B is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mistral-7B-v0.1,mistralai/Mistral-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,a,a,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/a/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a3092-0b9e8a735b34d5f7799a369f;ea3c4351-8f77-433a-8434-4bada68d5082) Repository Not Found for url: https://huggingface.co/a/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: a is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-40b,tiiuae/falcon-40b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-40b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-40b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,1,1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/1/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a34a7-27abac8017ac395a4981b5a6;4fd2c00b-0c70-4e23-b1e0-f6f729e0be23) Repository Not Found for url: https://huggingface.co/1/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 1 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-3b,stabilityai/stablelm-base-alpha-3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,t,t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/t/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2fd3-2b96c2b063f7df753db5e95b;33cd2b34-e9e7-4add-aa8b-98dcb35406a2) Repository Not Found for url: https://huggingface.co/t/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: t is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,/,/,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 370, in cached_file raise EnvironmentError( OSError: / does not appear to have a file named config.json. Checkout 'https://huggingface.co///tree/None' for available files. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc398-5fc105e351796cf60aec854a;92649cc5-631a-41a8-b6de-9518e77df712) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,-,-,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 106, in _inner_fn validate_repo_id(arg_value) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 160, in validate_repo_id raise HFValidationError( huggingface_hub.errors.HFValidationError: Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are forbidden, '-' and '.' cannot start or end the name, max length is 96: '-'. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 463, in cached_file raise EnvironmentError( OSError: Incorrect path_or_model_id: '-'. Please provide either the path to a local folder or the repo_id of a model on the Hub. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667cc3ee-702fb9245e83820a02751d55;3b312123-57f6-4857-bf03-9c3f35948031) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc2f0-55d24be5697d0bf31f0e58db;65db1bec-08b9-4fa5-bd80-d4e8af56db77) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Llama-2-13b-hf,meta-llama/Llama-2-13b-hf,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/phi-1_5,microsoft/phi-1_5,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-65b,huggyllama/llama-65b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 86.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cc342-6b1c34c516e926da2667a89d;c1114053-97e2-4de1-bd01-e13661f37dd3) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,i,i,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/i/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2f26-600c984b5e3d157e405b859f;4907457b-d2ac-4db1-9715-806c1f0f6578) Repository Not Found for url: https://huggingface.co/i/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: i is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,M,M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/M/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a2ed0-73c17fae688b18f07b4a4f5f;c2e363ee-3f75-4950-863c-42bc186511eb) Repository Not Found for url: https://huggingface.co/M/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: M is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2-large,openai-community/gpt2-large,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.1,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1282.564096,2645.03296,0.0,1998.585856,1692.285952,s,10,0.2531576633453369,0.02531576633453369,0.002450285014570774,0.024608816146850586,0.025783482170104975,0.02917710142135619,0.031891996822357174,"[0.03257072067260742, 0.02457766342163086, 0.02456787109375, 0.024639968872070313, 0.023735296249389647, 0.02453727912902832, 0.02502934455871582, 0.024820192337036133, 0.024842975616455078, 0.02383635139465332]",tokens/s,10112.275355093076,kWh,2.792075205920154e-07,1.529921474871729e-07,8.369050561796469e-07,1.2691047242588353e-06,tokens/kWh,201717001.84120387,MB,1282.859008,2645.03296,0.0,1998.585856,1740.091904,s,10,14.224613037109373,1.4224613037109373,0.007999866488579442,1.4226959228515625,1.4288772583007814,1.4350930480957031,1.4400656799316405,"[1.441308837890625, 1.4159813232421874, 1.4227452392578126, 1.4274959716796876, 1.4234324951171875, 1.4254453125, 1.42061181640625, 1.4226466064453125, 1.410975341796875, 1.4139700927734375]",tokens/s,44.28942976209244,kWh,1.649391267739289e-05,9.038513698214438e-06,3.331084448422058e-05,5.884327085982791e-05,tokens/kWh,1070640.6880418654,,s,629,14.421040109634406,0.022926931811819394,0.002966066291432769,0.022564863204956053,0.022848736572265625,0.023248467254638677,0.047059558868408236,"[0.02345062446594238, 0.023438335418701172, 0.023334911346435547, 0.02593791961669922, 0.023631872177124022, 0.023432191848754884, 0.023536640167236327, 0.022730752944946288, 0.02284752082824707, 0.022558687210083007, 0.022576128005981445, 0.022565887451171874, 0.022806528091430665, 0.02365644836425781, 0.023848960876464844, 0.023938047409057618, 0.023386144638061525, 0.023298015594482423, 0.023143423080444335, 0.023015424728393553, 0.02310758399963379, 0.022743040084838868, 0.022743040084838868, 0.02262937545776367, 0.022384639739990234, 0.022402048110961914, 0.022543359756469726, 0.02267136001586914, 0.022543359756469726, 0.022590463638305663, 0.022413312911987306, 0.02251468849182129, 0.022372352600097657, 0.022536191940307617, 0.022580223083496095, 0.022455327987670897, 0.02253718376159668, 0.022559743881225586, 0.022518783569335937, 0.022353919982910156, 0.023468063354492186, 0.02273072052001953, 0.0225167350769043, 0.022666240692138673, 0.022587392807006838, 0.022557695388793944, 0.022419456481933595, 0.022979583740234375, 0.0226375675201416, 0.022574079513549804, 0.02263039970397949, 0.022649856567382814, 0.023013376235961915, 0.022584320068359375, 0.022502399444580077, 0.022550527572631835, 0.022541311264038084, 0.022582271575927734, 0.02264371109008789, 0.022633472442626954, 0.022517759323120116, 0.022525951385498046, 0.0473620491027832, 0.022525951385498046, 0.02267238426208496, 0.02266009521484375, 0.022524927139282228, 0.022616064071655274, 0.022587392807006838, 0.022130687713623046, 0.02282598304748535, 0.022890495300292968, 0.02264575958251953, 0.022579200744628908, 0.022518783569335937, 0.022551551818847656, 0.02265088081359863, 0.02282700729370117, 0.022845439910888672, 0.02262937545776367, 0.022577152252197266, 0.022571008682250978, 0.02287615966796875, 0.02270515251159668, 0.022545408248901368, 0.022587392807006838, 0.02253004837036133, 0.02262118339538574, 0.022649856567382814, 0.022642688751220705, 0.02253926467895508, 0.022557695388793944, 0.02247065544128418, 0.021695487976074217, 0.0216494083404541, 0.021728256225585937, 0.02169343948364258, 0.021741567611694337, 0.02169753646850586, 0.021777408599853516, 0.0221265926361084, 0.02168832015991211, 0.021707775115966797, 0.021751808166503905, 0.022738943099975584, 0.022576128005981445, 0.02267750358581543, 0.02270412826538086, 0.022567935943603516, 0.022503423690795898, 0.022580223083496095, 0.022568960189819336, 0.022582271575927734, 0.02268876838684082, 0.02267955207824707, 0.022681600570678712, 0.022557695388793944, 0.02259660720825195, 0.022569984436035157, 0.02267033576965332, 0.022801408767700194, 0.02265292739868164, 0.02266111946105957, 0.022509567260742186, 0.022838272094726563, 0.047782913208007816, 0.022707199096679686, 0.022584320068359375, 0.022564863204956053, 0.022674432754516603, 0.022626304626464845, 0.022611967086791994, 0.0224849910736084, 0.02268262481689453, 0.022615039825439453, 0.02254643249511719, 0.022516767501831056, 0.022648799896240236, 0.022920192718505858, 0.02280243110656738, 0.022716415405273437, 0.02269696044921875, 0.022509567260742186, 0.02259660720825195, 0.02186240005493164, 0.02168524742126465, 0.02166988754272461, 0.02171494483947754, 0.021893119812011717, 0.021777408599853516, 0.022468608856201173, 0.022656000137329102, 0.022567935943603516, 0.022509567260742186, 0.02251468849182129, 0.022665216445922853, 0.022495231628417968, 0.022585344314575196, 0.022890495300292968, 0.022707199096679686, 0.022599679946899414, 0.0224901123046875, 0.022616064071655274, 0.022360063552856444, 0.021804031372070314, 0.023768064498901367, 0.023956480026245116, 0.02288128089904785, 0.022603776931762694, 0.0226713924407959, 0.023395296096801757, 0.022510591506958007, 0.02287718391418457, 0.02255462455749512, 0.022599679946899414, 0.022701055526733398, 0.022611967086791994, 0.022558719635009765, 0.022564863204956053, 0.022536191940307617, 0.022574079513549804, 0.022587392807006838, 0.022669343948364257, 0.022663135528564454, 0.022517759323120116, 0.022557695388793944, 0.022557695388793944, 0.02261299133300781, 0.047557632446289064, 0.022609920501708985, 0.02284339141845703, 0.022714399337768556, 0.022558687210083007, 0.02247065544128418, 0.022534143447875975, 0.02250752067565918, 0.0227061767578125, 0.022619136810302733, 0.02262937545776367, 0.022804479598999023, 0.022742015838623047, 0.022770687103271483, 0.022768640518188478, 0.022951936721801756, 0.02267852783203125, 0.022591487884521484, 0.022607872009277344, 0.022577152252197266, 0.022529024124145508, 0.02264678382873535, 0.022737920761108397, 0.022552576065063477, 0.02267136001586914, 0.022562816619873048, 0.022501375198364256, 0.022537216186523438, 0.022494207382202147, 0.02264678382873535, 0.02247987174987793, 0.022520832061767578, 0.022560768127441407, 0.02410700798034668, 0.02294988822937012, 0.022955007553100586, 0.022804479598999023, 0.022603776931762694, 0.022517759323120116, 0.02248806381225586, 0.022510591506958007, 0.022556671142578123, 0.022529024124145508, 0.022588415145874022, 0.022787071228027343, 0.022559743881225586, 0.0224901123046875, 0.022579200744628908, 0.02262118339538574, 0.022518783569335937, 0.022634496688842775, 0.022610944747924806, 0.02249113655090332, 0.02265907287597656, 0.022619136810302733, 0.022478847503662108, 0.0225167350769043, 0.023174144744873046, 0.02269388771057129, 0.022603776931762694, 0.022579200744628908, 0.022594560623168947, 0.02265292739868164, 0.046281726837158206, 0.022588415145874022, 0.022619136810302733, 0.022642688751220705, 0.022658048629760744, 0.022820863723754883, 0.022597631454467772, 0.022796287536621093, 0.022675455093383787, 0.022595584869384764, 0.02252288055419922, 0.022564863204956053, 0.0224849910736084, 0.02255462455749512, 0.022748159408569335, 0.02264678382873535, 0.02248294448852539, 0.022684671401977538, 0.02248908805847168, 0.022733823776245117, 0.022589439392089843, 0.022492160797119142, 0.02248089599609375, 0.022501375198364256, 0.022565887451171874, 0.022586368560791017, 0.02262428855895996, 0.022853599548339844, 0.022603776931762694, 0.022559743881225586, 0.02252288055419922, 0.022573055267333983, 0.022579200744628908, 0.02252390480041504, 0.022502399444580077, 0.02264473533630371, 0.022536191940307617, 0.022897663116455077, 0.022585344314575196, 0.02255564880371094, 0.022405120849609376, 0.022467584609985353, 0.022635520935058592, 0.022593536376953126, 0.0225218563079834, 0.022452224731445314, 0.022436864852905275, 0.022429695129394533, 0.02269696044921875, 0.022748159408569335, 0.02262015914916992, 0.022977535247802734, 0.022610944747924806, 0.022665216445922853, 0.022550527572631835, 0.022545440673828125, 0.022626272201538088, 0.02249830436706543, 0.02253926467895508, 0.02264371109008789, 0.022418432235717774, 0.02254745674133301, 0.022605823516845702, 0.04739276885986328, 0.02246143913269043, 0.022681600570678712, 0.02274406433105469, 0.022584320068359375, 0.022503423690795898, 0.02266828727722168, 0.02253824043273926, 0.02254643249511719, 0.022562816619873048, 0.022585344314575196, 0.022560768127441407, 0.02247987174987793, 0.022607872009277344, 0.022597631454467772, 0.022610944747924806, 0.022567935943603516, 0.022563840866088865, 0.02281881523132324, 0.022563840866088865, 0.0224849910736084, 0.02272051239013672, 0.0226375675201416, 0.02251263999938965, 0.022478847503662108, 0.022413312911987306, 0.02251468849182129, 0.0224901123046875, 0.02249113655090332, 0.02434867286682129, 0.023407615661621094, 0.022523935317993165, 0.022494176864624023, 0.022559743881225586, 0.022478847503662108, 0.022961151123046874, 0.022486015319824217, 0.022556671142578123, 0.02263859176635742, 0.022518783569335937, 0.022518783569335937, 0.02246451187133789, 0.02289356803894043, 0.022587392807006838, 0.022529024124145508, 0.02253926467895508, 0.022483968734741212, 0.022623231887817383, 0.022536191940307617, 0.022718463897705078, 0.02246451187133789, 0.022972415924072266, 0.02262835121154785, 0.0225167350769043, 0.022565887451171874, 0.022534143447875975, 0.022614015579223632, 0.02248192024230957, 0.02244812774658203, 0.022526975631713866, 0.022551551818847656, 0.022509567260742186, 0.022915071487426757, 0.04775833511352539, 0.02229145622253418, 0.022557695388793944, 0.02253107261657715, 0.022478847503662108, 0.0224399356842041, 0.02260479927062988, 0.022494207382202147, 0.02253004837036133, 0.022792192459106447, 0.02268057632446289, 0.02263039970397949, 0.022606847763061523, 0.022649856567382814, 0.022525951385498046, 0.022591487884521484, 0.022524927139282228, 0.022495231628417968, 0.022508544921875, 0.02243174362182617, 0.02255564880371094, 0.022437887191772463, 0.02250547218322754, 0.022789119720458984, 0.022580223083496095, 0.022500352859497072, 0.022552576065063477, 0.022320127487182616, 0.022432767868041992, 0.022403072357177735, 0.02246963119506836, 0.022459392547607423, 0.0224901123046875, 0.02248089599609375, 0.022550527572631835, 0.022467584609985353, 0.022548479080200197, 0.022475776672363282, 0.022477823257446287, 0.02252288055419922, 0.0224849910736084, 0.022486015319824217, 0.022814720153808594, 0.02265500831604004, 0.022472671508789063, 0.022571008682250978, 0.02265395164489746, 0.02249830436706543, 0.022722560882568358, 0.02262118339538574, 0.022388736724853517, 0.02170572853088379, 0.021612543106079102, 0.021695487976074217, 0.022331392288208008, 0.022533119201660155, 0.022584320068359375, 0.022502399444580077, 0.022580223083496095, 0.022626304626464845, 0.022617088317871094, 0.023888896942138672, 0.023900159835815428, 0.048435199737548826, 0.022572032928466795, 0.022586368560791017, 0.022606847763061523, 0.02252390480041504, 0.02247065544128418, 0.022392831802368163, 0.022492160797119142, 0.022527999877929687, 0.02246143913269043, 0.02269900894165039, 0.022972415924072266, 0.022939647674560547, 0.02273689651489258, 0.022567935943603516, 0.02269388771057129, 0.02290380859375, 0.022779903411865234, 0.022605823516845702, 0.022666240692138673, 0.02265497589111328, 0.022666240692138673, 0.02287308883666992, 0.022775808334350587, 0.022595584869384764, 0.02246963119506836, 0.0227061767578125, 0.022598655700683593, 0.02247987174987793, 0.0224532470703125, 0.02243174362182617, 0.022486015319824217, 0.022541343688964845, 0.022189023971557618, 0.021953535079956055, 0.02270515251159668, 0.022633472442626954, 0.02249625587463379, 0.022388736724853517, 0.022459392547607423, 0.022429695129394533, 0.022640640258789063, 0.02250444793701172, 0.022399999618530272, 0.022501375198364256, 0.022605823516845702, 0.0224399356842041, 0.022534143447875975, 0.02251468849182129, 0.02254745674133301, 0.022427648544311524, 0.0224716796875, 0.022389759063720704, 0.02249318313598633, 0.022563840866088865, 0.02271334457397461, 0.02255462455749512, 0.022589439392089843, 0.022642688751220705, 0.0231014404296875, 0.02254745674133301, 0.022579200744628908, 0.02269696044921875, 0.04771123123168945, 0.022592512130737305, 0.02264371109008789, 0.022725631713867187, 0.02244915199279785, 0.02268671989440918, 0.02250547218322754, 0.022475776672363282, 0.022564863204956053, 0.02251468849182129, 0.022571008682250978, 0.02249728012084961, 0.022548479080200197, 0.022580223083496095, 0.022772735595703125, 0.022789119720458984, 0.022508544921875, 0.022747135162353514, 0.022509567260742186, 0.022601728439331056, 0.022254592895507814, 0.022381568908691408, 0.022518783569335937, 0.02252288055419922, 0.022477823257446287, 0.02253209686279297, 0.02265292739868164, 0.02251571273803711, 0.022641664505004884, 0.0224532470703125, 0.022544384002685547, 0.022437887191772463, 0.02270207977294922, 0.022600704193115235, 0.022566911697387695, 0.02243071937561035, 0.022535167694091796, 0.02248089599609375, 0.02253824043273926, 0.022527999877929687, 0.02228531265258789, 0.02247987174987793, 0.022460416793823244, 0.02248806381225586, 0.022437887191772463, 0.02251571273803711, 0.022404096603393556, 0.022582271575927734, 0.02262118339538574, 0.02246143913269043, 0.022376447677612304, 0.02247270393371582, 0.021943296432495117, 0.02165862464904785, 0.021585920333862304, 0.02163609504699707, 0.021603328704833984, 0.021702655792236326, 0.021951488494873047, 0.02187980842590332, 0.021659648895263672, 0.02163609504699707, 0.021739519119262696, 0.045930496215820314, 0.021621759414672852, 0.02165247917175293, 0.021627904891967774, 0.0216944637298584, 0.021584896087646483, 0.02169343948364258, 0.021748735427856446, 0.021755903244018555, 0.022071296691894532, 0.021841920852661133, 0.021811199188232423, 0.022022144317626953, 0.022518783569335937, 0.022567935943603516, 0.022632448196411133, 0.02263654327392578, 0.022412288665771486, 0.022459392547607423, 0.022587392807006838, 0.022642688751220705, 0.022559743881225586, 0.022419456481933595, 0.022595584869384764, 0.022737920761108397, 0.02244812774658203, 0.022764543533325195, 0.02286591911315918, 0.022866943359375, 0.022540288925170897, 0.022541311264038084, 0.022564863204956053, 0.02249932861328125, 0.022607872009277344, 0.02301644706726074, 0.02265292739868164, 0.02348646354675293, 0.02287718391418457, 0.022573055267333983, 0.022571008682250978, 0.023392255783081056, 0.02261299133300781, 0.022563840866088865, 0.02254745674133301, 0.022725631713867187, 0.022534143447875975, 0.022619136810302733, 0.022601728439331056, 0.022498336791992188, 0.02250441551208496, 0.02248192024230957, 0.0224901123046875, 0.022467584609985353, 0.022432767868041992, 0.022517759323120116, 0.022550527572631835, 0.022475776672363282, 0.0225218563079834, 0.02246963119506836, 0.02247987174987793, 0.022467584609985353, 0.022591487884521484, 0.022592512130737305]",tokens/s,43.61682619409526,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,meta-llama/Meta-Llama-3-8B,meta-llama/Meta-Llama-3-8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-rw-1b,tiiuae/falcon-rw-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for tiiuae/falcon-rw-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/tiiuae/falcon-rw-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,x,x,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/x/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a31f5-4e883990616e2ccf54c565ad;bc587309-baba-4f53-a3bf-3c3c62f68bd4) Repository Not Found for url: https://huggingface.co/x/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: x is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-3b-4e1t,stabilityai/stablelm-3b-4e1t,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-2-1_6b,stabilityai/stablelm-2-1_6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,tiiuae/falcon-180B,tiiuae/falcon-180B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-66779481-334eb0556532a7bb14983151;dc79f4cd-5076-402a-9fb1-4dfc337e550a) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/tiiuae/falcon-180B/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like tiiuae/falcon-180B is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,8,8,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/8/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a319e-3c81f2003e9426844b7d74a9;b94d8a64-7024-47f7-a202-835ea05be3ad) Repository Not Found for url: https://huggingface.co/8/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: 8 is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-667cbe56-42eff0351b9ff7b80c093ea7;6ca70f53-c8c1-499c-a0e9-d6d8b41611f2) 403 Forbidden: Please enable access to public gated repositories in your fine-grained token settings to view this repository.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,microsoft/rho-math-1b-v0.1,microsoft/rho-math-1b-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,openai-community/gpt2,openai-community/gpt2,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,,,MB,1235.7632,1005.060096,0.0,358.612992,318.913024,s,20,0.1788675851821899,0.008943379259109496,0.00026419798811655123,0.00887070369720459,0.00916225633621216,0.009274708795547485,0.009786717424392698,"[0.009914719581604003, 0.008858528137207031, 0.008864447593688965, 0.008830816268920898, 0.008876959800720214, 0.009153504371643067, 0.008886079788208007, 0.008735072135925294, 0.009109472274780274, 0.008775872230529785, 0.008850208282470703, 0.00901251220703125, 0.00899942398071289, 0.008742527961730957, 0.009241024017333984, 0.008729375839233399, 0.00890329647064209, 0.008699104309082032, 0.008877599716186523, 0.008807040214538575]",tokens/s,28624.526879953683,kWh,1.0405706355691948e-07,5.701826505582485e-08,2.244287160088168e-07,3.8550404462156115e-07,tokens/kWh,664065665.643815,MB,1235.7632,1005.060096,0.0,358.612992,328.809472,s,20,10.08869351196289,0.5044346755981446,0.005335005585423021,0.503564468383789,0.5124426330566406,0.5136576141357422,0.5166526654052734,"[0.5010412292480468, 0.5011974487304688, 0.4979539489746094, 0.5063039855957031, 0.5047576599121094, 0.5012839050292969, 0.49773388671875, 0.5053358764648438, 0.5038833312988281, 0.51085546875, 0.5134605712890625, 0.5174014282226562, 0.50324560546875, 0.5123295288085937, 0.50199462890625, 0.5043636169433594, 0.4986883239746094, 0.5073371276855468, 0.49996554565429685, 0.49956039428710936]",tokens/s,124.89228644976944,kWh,6.0475178527528624e-06,3.3125900223381058e-06,1.029085747399049e-05,1.9650965349081454e-05,tokens/kWh,3205949.3709780937,,s,1259,10.248980564594259,0.008140572330892985,0.0011351723397017978,0.007914495944976807,0.00819527702331543,0.008268185138702392,0.017085111923217773,"[0.008740960121154785, 0.008759200096130371, 0.00870195198059082, 0.009260031700134277, 0.008202239990234375, 0.00787660789489746, 0.00785203218460083, 0.007820288181304931, 0.00780083179473877, 0.007929855823516846, 0.007827455997467042, 0.007792640209197998, 0.007823359966278077, 0.007840767860412597, 0.007830527782440186, 0.007824384212493896, 0.007812096118927002, 0.007836671829223632, 0.007806975841522217, 0.007794688224792481, 0.007799808025360107, 0.007849984169006348, 0.007786496162414551, 0.007824384212493896, 0.007945216178894043, 0.007836671829223632, 0.007806975841522217, 0.007813119888305664, 0.008041472434997558, 0.007838719844818116, 0.007780352115631104, 0.007794688224792481, 0.007814144134521485, 0.007823359966278077, 0.007830527782440186, 0.00779366397857666, 0.007840767860412597, 0.00790937614440918, 0.008236031532287597, 0.00806604766845703, 0.008050687789916992, 0.008067071914672852, 0.00800972843170166, 0.007974912166595459, 0.00786636781692505, 0.007839744091033935, 0.00782643222808838, 0.007836671829223632, 0.007819263935089112, 0.007831552028656007, 0.007787519931793213, 0.007797760009765625, 0.007813119888305664, 0.007910399913787843, 0.007830527782440186, 0.00781824016571045, 0.007817215919494629, 0.007847936153411865, 0.007822336196899414, 0.00779366397857666, 0.007809088230133056, 0.007972799777984619, 0.017083391189575196, 0.007821311950683594, 0.007833600044250488, 0.007803904056549072, 0.008130559921264649, 0.008444928169250488, 0.008322048187255859, 0.008384511947631837, 0.008393728256225585, 0.008424448013305665, 0.00838144016265869, 0.008119296073913575, 0.008051712036132813, 0.007872511863708496, 0.007865344047546387, 0.007855103969573975, 0.007837696075439453, 0.007867392063140868, 0.007815167903900147, 0.007829504013061523, 0.007813119888305664, 0.007836671829223632, 0.007914495944976807, 0.007887872219085693, 0.00783564805984497, 0.00791756820678711, 0.007890944004058837, 0.007857151985168457, 0.007817215919494629, 0.007907328128814697, 0.007833600044250488, 0.00782643222808838, 0.007804927825927735, 0.007836671829223632, 0.007812096118927002, 0.00781824016571045, 0.007804927825927735, 0.007844863891601562, 0.007870463848114014, 0.00782643222808838, 0.00781004810333252, 0.008635392189025879, 0.008225791931152344, 0.008044544219970704, 0.007888895988464355, 0.007927807807922363, 0.00791756820678711, 0.007895040035247802, 0.007888895988464355, 0.007903232097625732, 0.007906303882598878, 0.007899136066436767, 0.008059904098510743, 0.007930880069732665, 0.00791756820678711, 0.007890944004058837, 0.0078919677734375, 0.007902207851409913, 0.007917632102966308, 0.007903168201446533, 0.007907328128814697, 0.007885824203491211, 0.007904255867004394, 0.017123327255249024, 0.007914495944976807, 0.007899136066436767, 0.007911424160003662, 0.00787660789489746, 0.007897088050842285, 0.007896063804626464, 0.007951392173767089, 0.007910367965698242, 0.00790937614440918, 0.007902207851409913, 0.007904255867004394, 0.007924736022949219, 0.00791756820678711, 0.007886847972869874, 0.007912447929382324, 0.007911424160003662, 0.007878655910491944, 0.007873536109924317, 0.007907328128814697, 0.007894015789031983, 0.00790118408203125, 0.00790118408203125, 0.007882751941680909, 0.007897088050842285, 0.00790937614440918, 0.00790015983581543, 0.007895040035247802, 0.007877632141113282, 0.007913472175598145, 0.007868415832519531, 0.007877632141113282, 0.007882751941680909, 0.007887872219085693, 0.007899136066436767, 0.007874559879302979, 0.00788479995727539, 0.007873536109924317, 0.007944191932678223, 0.007880703926086426, 0.007899136066436767, 0.007952383995056152, 0.007964672088623047, 0.007881728172302246, 0.007886847972869874, 0.007874559879302979, 0.00789299201965332, 0.007895040035247802, 0.00787660789489746, 0.007916543960571289, 0.007904255867004394, 0.007903232097625732, 0.007869440078735352, 0.007903232097625732, 0.007887872219085693, 0.00796675205230713, 0.007912415981292725, 0.00786636781692505, 0.00789299201965332, 0.007875584125518798, 0.007924767971038818, 0.007874527931213378, 0.0078919677734375, 0.017052671432495118, 0.007898111820220948, 0.007918591976165772, 0.007899136066436767, 0.00785923194885254, 0.007882719993591308, 0.007894015789031983, 0.007855103969573975, 0.00787769603729248, 0.007874495983123779, 0.007897088050842285, 0.007960576057434082, 0.007932928085327149, 0.007918591976165772, 0.007887872219085693, 0.007918591976165772, 0.008210432052612305, 0.007874559879302979, 0.007902207851409913, 0.00789299201965332, 0.007839776039123536, 0.007864287853240967, 0.007882751941680909, 0.007897088050842285, 0.007990272045135497, 0.007927807807922363, 0.007896063804626464, 0.007938047885894776, 0.007918591976165772, 0.008033280372619628, 0.008353792190551757, 0.008071167945861817, 0.007939072132110595, 0.007919616222381591, 0.007890944004058837, 0.007920639991760254, 0.008637439727783204, 0.00832921600341797, 0.008156160354614257, 0.00827187156677246, 0.008194047927856446, 0.008177663803100586, 0.008159232139587403, 0.008148032188415527, 0.00825443172454834, 0.008152031898498535, 0.008161279678344726, 0.008157183647155761, 0.008122367858886719, 0.008179712295532226, 0.008135680198669434, 0.008156160354614257, 0.008168448448181152, 0.008136704444885253, 0.008164352416992187, 0.008143872261047362, 0.008163328170776368, 0.008181759834289551, 0.0082227201461792, 0.008193023681640625, 0.007928832054138184, 0.00785920000076294, 0.007887872219085693, 0.017110015869140623, 0.007940095901489258, 0.007927807807922363, 0.007938047885894776, 0.007880703926086426, 0.007928832054138184, 0.007878655910491944, 0.007922688007354736, 0.007928832054138184, 0.007921664237976075, 0.007896063804626464, 0.007921728134155273, 0.007894976139068604, 0.007971839904785156, 0.007922688007354736, 0.007927807807922363, 0.007964672088623047, 0.00813158416748047, 0.008223744392395019, 0.008168448448181152, 0.008158207893371582, 0.008228863716125488, 0.008165375709533691, 0.008120320320129394, 0.00812342357635498, 0.008126432418823243, 0.008164352416992187, 0.008128512382507324, 0.008146944046020508, 0.00789299201965332, 0.007964672088623047, 0.007932928085327149, 0.00788479995727539, 0.007954432010650634, 0.007886847972869874, 0.007877632141113282, 0.007887872219085693, 0.007885824203491211, 0.00790835189819336, 0.00788479995727539, 0.00787660789489746, 0.007916543960571289, 0.007880703926086426, 0.007913472175598145, 0.007856128215789794, 0.007871488094329833, 0.007870463848114014, 0.007887872219085693, 0.007919616222381591, 0.007886847972869874, 0.007913472175598145, 0.008102911949157715, 0.008297472000122071, 0.008225824356079102, 0.008167391777038574, 0.008201215744018555, 0.008158207893371582, 0.008196096420288086, 0.008138751983642578, 0.008178751945495605, 0.008141759872436524, 0.008168448448181152, 0.008168448448181152, 0.017675264358520508, 0.008162303924560547, 0.008164352416992187, 0.008168448448181152, 0.007974912166595459, 0.007903232097625732, 0.007895040035247802, 0.007900191783905029, 0.007922656059265136, 0.007906303882598878, 0.00795136022567749, 0.007872511863708496, 0.008233983993530274, 0.008203264236450195, 0.008174592018127442, 0.007968768119812012, 0.007970816135406494, 0.00800767993927002, 0.007923711776733398, 0.00791756820678711, 0.007895040035247802, 0.007906303882598878, 0.007885824203491211, 0.007902207851409913, 0.007950335979461669, 0.007906303882598878, 0.00800153636932373, 0.008085503578186035, 0.007935999870300293, 0.00807423973083496, 0.007925759792327881, 0.007998464107513427, 0.007912447929382324, 0.007880703926086426, 0.007910431861877442, 0.007928800106048585, 0.007922688007354736, 0.00797388792037964, 0.007903232097625732, 0.007905280113220215, 0.007947264194488525, 0.007898111820220948, 0.007899136066436767, 0.00788479995727539, 0.007897088050842285, 0.007875584125518798, 0.007867392063140868, 0.00787660789489746, 0.007890944004058837, 0.007881728172302246, 0.007879680156707763, 0.007906303882598878, 0.007890944004058837, 0.007894015789031983, 0.0078919677734375, 0.00790835189819336, 0.007898111820220948, 0.007895040035247802, 0.007961631774902344, 0.007899104118347168, 0.00791756820678711, 0.007910399913787843, 0.008005632400512695, 0.017169408798217774, 0.0079267840385437, 0.00794316816329956, 0.007887872219085693, 0.007921664237976075, 0.007984127998352051, 0.00790015983581543, 0.007987199783325195, 0.007899136066436767, 0.007883776187896728, 0.007895040035247802, 0.007885824203491211, 0.007896063804626464, 0.007905344009399413, 0.007891903877258301, 0.007874559879302979, 0.007882751941680909, 0.007895040035247802, 0.007906303882598878, 0.007896063804626464, 0.007887872219085693, 0.007865344047546387, 0.007896063804626464, 0.007923711776733398, 0.007896063804626464, 0.007872511863708496, 0.007886847972869874, 0.007873536109924317, 0.00788479995727539, 0.00788479995727539, 0.007883776187896728, 0.007895040035247802, 0.007899136066436767, 0.007889920234680176, 0.007888895988464355, 0.007883776187896728, 0.007887872219085693, 0.007912447929382324, 0.00789299201965332, 0.007896063804626464, 0.007897088050842285, 0.00789299201965332, 0.007904255867004394, 0.007873536109924317, 0.0078919677734375, 0.007883776187896728, 0.007934016227722168, 0.007873472213745117, 0.007871488094329833, 0.007897088050842285, 0.007870463848114014, 0.007869440078735352, 0.007874559879302979, 0.007904255867004394, 0.007865344047546387, 0.00790835189819336, 0.007881728172302246, 0.007898111820220948, 0.007878655910491944, 0.007904255867004394, 0.007894015789031983, 0.007880703926086426, 0.007881760120391846, 0.016962528228759766, 0.00789299201965332, 0.007874559879302979, 0.007890944004058837, 0.0078919677734375, 0.007897088050842285, 0.007905280113220215, 0.007899136066436767, 0.007896063804626464, 0.0078919677734375, 0.007912447929382324, 0.007894015789031983, 0.007956480026245117, 0.007898176193237304, 0.007912384033203126, 0.007885824203491211, 0.007932928085327149, 0.007882751941680909, 0.007877632141113282, 0.00787660789489746, 0.007870463848114014, 0.00789299201965332, 0.00787660789489746, 0.007877632141113282, 0.007886879920959473, 0.007880671977996826, 0.007911424160003662, 0.007899136066436767, 0.007886847972869874, 0.007935999870300293, 0.007873536109924317, 0.007868415832519531, 0.0078919677734375, 0.007889920234680176, 0.007879680156707763, 0.007983104228973388, 0.007993343830108643, 0.008654848098754882, 0.008453120231628418, 0.008224767684936523, 0.008190976142883302, 0.008196096420288086, 0.008163328170776368, 0.008144895553588867, 0.008160256385803222, 0.0081725435256958, 0.008152064323425292, 0.008176639556884765, 0.008184831619262695, 0.008145919799804687, 0.00812339210510254, 0.008157183647155761, 0.008146944046020508, 0.008195072174072266, 0.0081397762298584, 0.00821555233001709, 0.008168448448181152, 0.0081397762298584, 0.008179743766784668, 0.00813974380493164, 0.00812339210510254, 0.00800767993927002, 0.008005632400512695, 0.017589248657226563, 0.008153087615966797, 0.008118271827697754, 0.00813158416748047, 0.008112128257751466, 0.008121343612670898, 0.008136704444885253, 0.008134655952453614, 0.0081397762298584, 0.008102944374084473, 0.008115167617797852, 0.008144895553588867, 0.008137727737426758, 0.008156160354614257, 0.008138751983642578, 0.008175616264343261, 0.008162303924560547, 0.00818995189666748, 0.00819711971282959, 0.008226816177368163, 0.00820736026763916, 0.007906303882598878, 0.007905280113220215, 0.007881824016571046, 0.007879583835601807, 0.007870463848114014, 0.007897088050842285, 0.007874559879302979, 0.007897088050842285, 0.007871488094329833, 0.007896063804626464, 0.007914495944976807, 0.00790937614440918, 0.007870463848114014, 0.007983104228973388, 0.008044544219970704, 0.008096768379211425, 0.007906303882598878, 0.008112128257751466, 0.007983104228973388, 0.007894015789031983, 0.007897088050842285, 0.007886847972869874, 0.007949312210083008, 0.008068096160888672, 0.007966720104217529, 0.007918591976165772, 0.007911424160003662, 0.007865344047546387, 0.00786742401123047, 0.007906271934509277, 0.007863296031951903, 0.007878687858581543, 0.007874527931213378, 0.007888895988464355, 0.007899136066436767, 0.00789299201965332, 0.007874559879302979, 0.007891007900238037, 0.007904191970825196, 0.007889920234680176, 0.007860223770141601, 0.007889920234680176, 0.016955392837524414, 0.007903232097625732, 0.007902207851409913, 0.007949312210083008, 0.008045568466186523, 0.008096768379211425, 0.008201215744018555, 0.008014847755432129, 0.007928832054138184, 0.007928832054138184, 0.007914495944976807, 0.007942143917083741, 0.00790937614440918, 0.00790835189819336, 0.007902207851409913, 0.007897088050842285, 0.007897119998931884, 0.007896031856536865, 0.00787660789489746, 0.007882751941680909, 0.007858176231384278, 0.008070143699645996, 0.007881728172302246, 0.007886847972869874, 0.008017919540405273, 0.008667136192321777, 0.008648703575134278, 0.009057279586791991, 0.008278016090393067, 0.008357888221740722, 0.008216575622558593, 0.008090656280517578, 0.008072159767150878, 0.008176639556884765, 0.008013824462890624, 0.008032256126403809, 0.008014847755432129, 0.008168448448181152, 0.008040448188781739, 0.008073216438293456, 0.008193023681640625, 0.008168448448181152, 0.008135680198669434, 0.008099840164184571, 0.008014847755432129, 0.008209407806396484, 0.008148991584777832, 0.00819916820526123, 0.008255488395690918, 0.008286208152770995, 0.008129535675048828, 0.008140800476074218, 0.0081397762298584, 0.008138751983642578, 0.008297472000122071, 0.008203264236450195, 0.008134655952453614, 0.008274944305419921, 0.008238080024719239, 0.008150015830993652, 0.008124416351318359, 0.008136768341064454, 0.008249279975891113, 0.017087488174438475, 0.007910399913787843, 0.007920639991760254, 0.008250368118286134, 0.008164352416992187, 0.008233983993530274, 0.008186880111694337, 0.008132608413696289, 0.008143872261047362, 0.008156160354614257, 0.008376352310180664, 0.008221664428710938, 0.008168448448181152, 0.008158207893371582, 0.008151040077209473, 0.008147968292236327, 0.008152064323425292, 0.008318976402282715, 0.008307711601257324, 0.008219648361206054, 0.00800153636932373, 0.008033280372619628, 0.008261631965637208, 0.00818380832672119, 0.008162303924560547, 0.008141823768615723, 0.007873536109924317, 0.007918591976165772, 0.007895040035247802, 0.007880703926086426, 0.008042495727539062, 0.008142848014831543, 0.008126463890075684, 0.008128512382507324, 0.008168448448181152, 0.008210432052612305, 0.008141823768615723, 0.00820736026763916, 0.008134655952453614, 0.00812339210510254, 0.008116224288940429, 0.00813158416748047, 0.008230912208557128, 0.008254464149475099, 0.008152064323425292, 0.008153087615966797, 0.008138751983642578, 0.008230912208557128, 0.008318976402282715, 0.008128512382507324, 0.008023039817810058, 0.008267775535583496, 0.008164352416992187, 0.008136704444885253, 0.008127488136291505, 0.00820736026763916, 0.008120320320129394, 0.008137727737426758, 0.00810905647277832, 0.008242176055908204, 0.00839782428741455, 0.008219648361206054, 0.0081397762298584, 0.017508352279663086, 0.008938495635986327, 0.008742912292480469, 0.00971059226989746, 0.008355839729309082, 0.008208383560180664, 0.008152064323425292, 0.008168448448181152, 0.008190976142883302, 0.008128512382507324, 0.008148991584777832, 0.00819200038909912, 0.008155136108398438, 0.008122367858886719, 0.008112128257751466, 0.00818892765045166, 0.008202239990234375, 0.008152064323425292, 0.008154111862182617, 0.00818995189666748, 0.008156160354614257, 0.008177663803100586, 0.008173567771911621, 0.008204287528991699, 0.008194047927856446, 0.008203264236450195, 0.008143872261047362, 0.00818995189666748, 0.008177663803100586, 0.00820633602142334, 0.008037376403808593, 0.00800972843170166, 0.008042495727539062, 0.008101887702941894, 0.008113151550292968, 0.008145919799804687, 0.008156160354614257, 0.008130559921264649, 0.0081397762298584, 0.008161279678344726, 0.008170495986938477, 0.008151040077209473, 0.008163328170776368, 0.008194047927856446, 0.008196096420288086, 0.008153087615966797, 0.008142848014831543, 0.008136704444885253, 0.008156160354614257, 0.008150015830993652, 0.008145919799804687, 0.008147968292236327, 0.008154111862182617, 0.008148991584777832, 0.008158207893371582, 0.00813158416748047, 0.008259584426879883, 0.008144895553588867, 0.008218624114990235, 0.00830463981628418, 0.008120320320129394, 0.008150015830993652, 0.008175616264343261, 0.017464319229125978, 0.008178688049316407, 0.008190976142883302, 0.0081725435256958, 0.008135680198669434, 0.008138751983642578, 0.0081397762298584, 0.008038399696350097, 0.007894015789031983, 0.007888895988464355, 0.007879680156707763, 0.00788479995727539, 0.007920639991760254, 0.008181759834289551, 0.008130559921264649, 0.008185855865478516, 0.008140800476074218, 0.008155136108398438, 0.00813158416748047, 0.008122367858886719, 0.007890944004058837, 0.007875584125518798, 0.007867392063140868, 0.00787660789489746, 0.007881728172302246, 0.008043519973754883, 0.008003583908081055, 0.00809062385559082, 0.008116224288940429, 0.008218624114990235, 0.008130559921264649, 0.008170495986938477, 0.00818995189666748, 0.008052736282348634, 0.007889920234680176, 0.007963647842407226, 0.00790015983581543, 0.007899136066436767, 0.007959551811218261, 0.00790015983581543, 0.007911424160003662, 0.007858176231384278, 0.0078919677734375, 0.007912447929382324, 0.007858208179473877, 0.007885791778564454, 0.00790118408203125, 0.0079267840385437, 0.007885824203491211, 0.007836671829223632, 0.00790841579437256, 0.00794924783706665, 0.007862271785736084, 0.007860223770141601, 0.007871488094329833, 0.007906303882598878, 0.007912447929382324, 0.007873536109924317, 0.007865344047546387, 0.007861248016357422, 0.007913472175598145, 0.007860223770141601, 0.007880703926086426, 0.01722265625, 0.008181759834289551, 0.008146944046020508, 0.008179712295532226, 0.008160256385803222, 0.008201215744018555, 0.008342592239379883, 0.008229824066162109, 0.008120320320129394, 0.008130559921264649, 0.008120320320129394, 0.00821555233001709, 0.008196096420288086, 0.008161279678344726, 0.008143872261047362, 0.008142848014831543, 0.008159232139587403, 0.008179712295532226, 0.008135680198669434, 0.008193023681640625, 0.008152064323425292, 0.008148991584777832, 0.008143872261047362, 0.00818380832672119, 0.008122367858886719, 0.008177663803100586, 0.00818995189666748, 0.008096768379211425, 0.008077312469482421, 0.008087552070617676, 0.008060928344726562, 0.007846975803375244, 0.007794623851776123, 0.007839744091033935, 0.007877632141113282, 0.007870463848114014, 0.00796569585800171, 0.00821555233001709, 0.008161279678344726, 0.008137727737426758, 0.0081397762298584, 0.008158207893371582, 0.008185855865478516, 0.008094719886779785, 0.008176639556884765, 0.008166399955749512, 0.008152064323425292, 0.008129535675048828, 0.008145919799804687, 0.008144895553588867, 0.008102911949157715, 0.008136704444885253, 0.008160256385803222, 0.008142848014831543, 0.008163328170776368, 0.008125503540039063, 0.00813152027130127, 0.008148991584777832, 0.00818995189666748, 0.008140800476074218, 0.008144895553588867, 0.008143872261047362, 0.008157183647155761, 0.017770496368408203, 0.008144895553588867, 0.008167424201965333, 0.008121343612670898, 0.008127488136291505, 0.008167424201965333, 0.008127488136291505, 0.00813366413116455, 0.008134655952453614, 0.008162272453308106, 0.008150015830993652, 0.008126463890075684, 0.00828006362915039, 0.00813158416748047, 0.00809267234802246, 0.00790118408203125, 0.00790015983581543, 0.007904255867004394, 0.007929855823516846, 0.007919616222381591, 0.007922688007354736, 0.007886847972869874, 0.007990272045135497, 0.007903232097625732, 0.00789299201965332, 0.008048640251159669, 0.007890944004058837, 0.007895040035247802, 0.007911424160003662, 0.007920639991760254, 0.007906303882598878, 0.007864319801330566, 0.007913472175598145, 0.00788479995727539, 0.007877632141113282, 0.007867392063140868, 0.00788479995727539, 0.007886847972869874, 0.00790118408203125, 0.00790118408203125, 0.00790937614440918, 0.007898111820220948, 0.007895040035247802, 0.007885824203491211, 0.007911424160003662, 0.007966720104217529, 0.007956480026245117, 0.007877632141113282, 0.007889920234680176, 0.007912447929382324, 0.007882751941680909, 0.007911424160003662, 0.007864319801330566, 0.007896063804626464, 0.007914527893066407, 0.007879648208618164, 0.00789299201965332, 0.007902207851409913, 0.007905280113220215, 0.007902207851409913, 0.007896063804626464, 0.007899136066436767, 0.00789299201965332, 0.016945152282714843, 0.007976960182189942, 0.007987199783325195, 0.007897088050842285, 0.007890944004058837, 0.007878655910491944, 0.007885824203491211, 0.008117247581481933, 0.008261631965637208, 0.008130592346191406, 0.008142815589904786, 0.00818073558807373, 0.008161279678344726, 0.008145919799804687, 0.008124447822570801, 0.0082390718460083, 0.008175616264343261, 0.008152064323425292, 0.008142848014831543, 0.008135680198669434, 0.008121343612670898, 0.008034303665161132, 0.008308735847473145, 0.008354816436767578, 0.008210432052612305, 0.00818892765045166, 0.00840499210357666, 0.008446975708007813, 0.008169471740722656, 0.00808448028564453, 0.007904255867004394, 0.007879680156707763, 0.007803904056549072, 0.007822336196899414, 0.007821311950683594, 0.007830527782440186, 0.00781824016571045, 0.00783564805984497, 0.007887872219085693, 0.00790937614440918, 0.007906303882598878, 0.007878655910491944, 0.007903232097625732, 0.00790118408203125, 0.007885824203491211, 0.007912447929382324, 0.007875584125518798, 0.007875584125518798, 0.00785920000076294, 0.007907328128814697, 0.007918591976165772, 0.007895040035247802, 0.007977983951568603, 0.007904255867004394, 0.007905280113220215, 0.007886911869049072, 0.007864255905151368, 0.00790118408203125, 0.007961599826812745, 0.007873536109924317, 0.007895040035247802, 0.007902207851409913, 0.007896063804626464, 0.017140735626220704, 0.007910399913787843, 0.007895040035247802, 0.007890944004058837, 0.007916543960571289, 0.007902207851409913, 0.00790937614440918, 0.007895040035247802, 0.007900191783905029, 0.007898079872131347, 0.007911424160003662, 0.007941120147705078, 0.007872511863708496, 0.007905280113220215, 0.007915520191192627, 0.00790835189819336, 0.007896063804626464, 0.007886847972869874, 0.007879744052886963, 0.007878592014312744, 0.007881728172302246, 0.007902207851409913, 0.00787660789489746, 0.007873536109924317, 0.007888895988464355, 0.00790835189819336, 0.007879680156707763, 0.00790118408203125, 0.007902207851409913, 0.00790015983581543, 0.007922688007354736, 0.007886847972869874, 0.007983104228973388, 0.007858176231384278, 0.0077916159629821775, 0.007862271785736084, 0.00781824016571045, 0.007829504013061523, 0.0078438401222229, 0.007886847972869874, 0.00790835189819336, 0.007917600154876709, 0.007915487766265869, 0.007886847972869874, 0.007902207851409913, 0.008055808067321778, 0.008141823768615723, 0.008117247581481933, 0.008110079765319824, 0.008225791931152344, 0.007896063804626464, 0.007907328128814697, 0.007890944004058837, 0.007872511863708496, 0.0078919677734375, 0.007872511863708496, 0.007890944004058837, 0.007888895988464355, 0.00785920000076294, 0.007879680156707763, 0.007883776187896728, 0.007872511863708496, 0.007987199783325195, 0.01689091110229492, 0.007906271934509277, 0.007910399913787843, 0.007885824203491211, 0.007910399913787843, 0.008279040336608886, 0.008184831619262695, 0.008133631706237793, 0.008155136108398438, 0.008173567771911621, 0.008164352416992187, 0.008150015830993652, 0.008140864372253418, 0.008307647705078126, 0.008159232139587403, 0.008154111862182617, 0.00818380832672119, 0.008154111862182617, 0.008138751983642578, 0.008128512382507324, 0.008130559921264649, 0.008145919799804687, 0.008111104011535645, 0.008121343612670898, 0.008159328460693359, 0.008150943756103516, 0.008162303924560547, 0.008224767684936523, 0.008142848014831543, 0.008118271827697754, 0.008174592018127442, 0.008132608413696289, 0.008121343612670898, 0.008136704444885253, 0.008122367858886719, 0.008136735916137696, 0.008140768051147461, 0.008134655952453614, 0.008135680198669434, 0.008136704444885253, 0.008128512382507324, 0.008117247581481933, 0.007903232097625732, 0.007888895988464355, 0.007879680156707763, 0.007862271785736084, 0.007904255867004394, 0.007907328128814697, 0.007888895988464355, 0.007863296031951903, 0.00788479995727539, 0.007880703926086426, 0.007897088050842285, 0.00787660789489746, 0.007877632141113282, 0.007864319801330566, 0.007898111820220948, 0.007869440078735352, 0.008171520233154296, 0.007862271785736084, 0.007905280113220215, 0.00789299201965332, 0.007885824203491211, 0.017105920791625977, 0.007873536109924317, 0.007889920234680176, 0.007930880069732665, 0.00790015983581543, 0.007897088050842285, 0.0078919677734375, 0.007902207851409913, 0.007905280113220215, 0.007921664237976075, 0.007916543960571289, 0.00790015983581543, 0.0078919677734375, 0.007914495944976807, 0.007897088050842285, 0.007927807807922363, 0.007922688007354736, 0.007899136066436767, 0.007882751941680909, 0.007895040035247802, 0.007905280113220215, 0.007902207851409913, 0.007878655910491944, 0.007896063804626464, 0.007888895988464355, 0.007855103969573975, 0.007883776187896728, 0.007883776187896728, 0.007855103969573975, 0.007872511863708496, 0.007853055953979492, 0.007902207851409913, 0.007882751941680909, 0.007883776187896728, 0.007874559879302979, 0.007872511863708496, 0.007898111820220948, 0.007904255867004394, 0.008046591758728027, 0.007898111820220948, 0.007877632141113282, 0.007856192111968995, 0.007872447967529297, 0.00787660789489746, 0.007890944004058837, 0.007873536109924317, 0.007888927936553955, 0.007879648208618164, 0.007868415832519531, 0.007879680156707763, 0.0078919677734375, 0.007882751941680909, 0.007896063804626464, 0.007927807807922363, 0.008242176055908204, 0.008209440231323242, 0.008128479957580566, 0.008130559921264649, 0.008132608413696289, 0.008170495986938477, 0.008165375709533691, 0.008122367858886719, 0.008122367858886719, 0.017102848052978514, 0.007897088050842285, 0.008235008239746093, 0.008123456001281738, 0.00811308765411377, 0.008124416351318359, 0.008163328170776368, 0.008134655952453614, 0.008124416351318359, 0.007883776187896728, 0.007894015789031983, 0.00789299201965332, 0.007903232097625732, 0.007879680156707763, 0.007888895988464355, 0.007906303882598878, 0.007946239948272706, 0.007930880069732665, 0.007925759792327881, 0.00787052822113037, 0.007870399951934814, 0.007874559879302979, 0.007879680156707763, 0.00790118408203125, 0.007874559879302979, 0.007903232097625732, 0.007858176231384278, 0.007882751941680909, 0.00785920000076294, 0.007887872219085693, 0.007886847972869874, 0.007887872219085693, 0.007857151985168457, 0.007988224029541016, 0.007899136066436767, 0.007855103969573975, 0.007875584125518798, 0.007875584125518798, 0.007878655910491944, 0.007942143917083741, 0.007910399913787843, 0.007874591827392578, 0.007892000198364257, 0.007928768157958984, 0.007923711776733398, 0.007873536109924317, 0.007880703926086426, 0.007872511863708496, 0.007953407764434815, 0.007883776187896728, 0.007881728172302246, 0.007890944004058837, 0.007899136066436767, 0.007879680156707763, 0.007898111820220948, 0.007873568058013916, 0.007885791778564454, 0.007894015789031983, 0.00795750379562378, 0.00790835189819336, 0.007937024116516114, 0.007947264194488525, 0.00787667179107666]",tokens/s,122.84148575218228,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,togethercomputer/RedPajama-INCITE-Base-7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-30b,huggyllama/llama-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,togethercomputer/RedPajama-INCITE-Base-3B-v1,togethercomputer/RedPajama-INCITE-Base-3B-v1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,mistralai/Mixtral-8x7B-v0.1,mistralai/Mixtral-8x7B-v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 20.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,r,r,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1024, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/r/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-667a302f-0364a8382f04bc3f18084b88;ee2548e4-8031-41b0-aac4-007ab7950484) Repository Not Found for url: https://huggingface.co/r/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: r is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,stabilityai/stablelm-base-alpha-7b,stabilityai/stablelm-base-alpha-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.42.1,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 564, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3907, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 466, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.1+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,False,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.218-208.862.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.2,,0.31.0,,,,1.20.0,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 612, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 625, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1