diff --git "a/huggingface_optimum-benchmark.txt" "b/huggingface_optimum-benchmark.txt" new file mode 100644--- /dev/null +++ "b/huggingface_optimum-benchmark.txt" @@ -0,0 +1,5681 @@ +# File: optimum-benchmark-main/llm_perf/update_llm_perf_cpu_pytorch.py +import os +import traceback +from itertools import product +from logging import getLogger +from llm_perf.utils import CANONICAL_PRETRAINED_OPEN_LLM_LIST, GENERATE_KWARGS, INPUT_SHAPES, OPEN_LLM_LIST, PRETRAINED_OPEN_LLM_LIST, is_benchmark_conducted +from optimum_benchmark import Benchmark, BenchmarkConfig, BenchmarkReport, InferenceConfig, ProcessConfig, PyTorchConfig +from optimum_benchmark.logging_utils import setup_logging +SUBSET = os.getenv('SUBSET', None) +MACHINE = os.getenv('MACHINE', None) +BACKEND = 'pytorch' +HARDWARE = 'cpu' +if os.getenv('MACHINE', None) is None and os.getenv('SUBSET', None) is None: + PUSH_REPO_ID = f'optimum-benchmark/llm-perf-{BACKEND}-{HARDWARE}-debug' + CANONICAL_PRETRAINED_OPEN_LLM_LIST = ['gpt2'] + SUBSET = 'unquantized' +elif os.getenv('MACHINE', None) is not None and os.getenv('SUBSET', None) is not None: + PUSH_REPO_ID = f'optimum-benchmark/llm-perf-{BACKEND}-{HARDWARE}-{SUBSET}-{MACHINE}' +else: + raise ValueError('Either both MACHINE and SUBSET should be set for benchmarking or neither for debugging') +ATTENTION_CONFIGS = ['eager', 'sdpa'] +if SUBSET == 'unquantized': + WEIGHTS_CONFIGS = {'float32': {'torch_dtype': 'float32', 'quant_scheme': None, 'quant_config': {}}, 'float16': {'torch_dtype': 'float16', 'quant_scheme': None, 'quant_config': {}}, 'bfloat16': {'torch_dtype': 'bfloat16', 'quant_scheme': None, 'quant_config': {}}} +else: + raise ValueError(f'Subset {SUBSET} not supported') +LOGGER = getLogger('llm-perf-backend') +LOGGER.info(f'len(OPEN_LLM_LIST): {len(OPEN_LLM_LIST)}') +LOGGER.info(f'len(PRETRAINED_OPEN_LLM_LIST): {len(PRETRAINED_OPEN_LLM_LIST)}') +LOGGER.info(f'len(CANONICAL_PRETRAINED_OPEN_LLM_LIST): {len(CANONICAL_PRETRAINED_OPEN_LLM_LIST)}') + +def is_benchmark_supported(weights_config, attn_implementation, hardware): + if attn_implementation == 'flash_attention_2': + return False + return True + +def benchmark_cpu_pytorch(model, attn_implementation, weights_config): + benchmark_name = f'{weights_config}-{attn_implementation}-{BACKEND}' + subfolder = f"{benchmark_name}/{model.replace('/', '--')}" + torch_dtype = WEIGHTS_CONFIGS[weights_config]['torch_dtype'] + quant_scheme = WEIGHTS_CONFIGS[weights_config]['quant_scheme'] + quant_config = WEIGHTS_CONFIGS[weights_config]['quant_config'] + if not is_benchmark_supported(weights_config, attn_implementation, HARDWARE): + LOGGER.info(f'Skipping benchmark {benchmark_name} with model {model} since it is not supported') + return + if is_benchmark_conducted(PUSH_REPO_ID, subfolder): + LOGGER.info(f'Skipping benchmark {benchmark_name} with model {model} since it was already conducted') + return + launcher_config = ProcessConfig() + scenario_config = InferenceConfig(memory=True, energy=True, latency=True, duration=10, iterations=10, warmup_runs=10, input_shapes=INPUT_SHAPES, generate_kwargs=GENERATE_KWARGS) + backend_config = PyTorchConfig(model=model, device='cpu', no_weights=True, library='transformers', task='text-generation', torch_dtype=torch_dtype, quantization_scheme=quant_scheme, quantization_config=quant_config, attn_implementation=attn_implementation, model_kwargs={'trust_remote_code': True}) + benchmark_config = BenchmarkConfig(name=benchmark_name, scenario=scenario_config, launcher=launcher_config, backend=backend_config) + benchmark_config.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + try: + LOGGER.info(f'Running benchmark {benchmark_name} with model {model}') + benchmark_report = Benchmark.launch(benchmark_config) + benchmark_report.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + benchmark = Benchmark(config=benchmark_config, report=benchmark_report) + benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + except Exception: + LOGGER.error(f'Benchmark {benchmark_name} failed with model {model}') + benchmark_report = BenchmarkReport.from_dict({'traceback': traceback.format_exc()}) + benchmark_report.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + benchmark = Benchmark(config=benchmark_config, report=benchmark_report) + benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) +if __name__ == '__main__': + os.environ['LOG_TO_FILE'] = '0' + os.environ['LOG_LEVEL'] = 'INFO' + setup_logging(level='INFO', prefix='MAIN-PROCESS') + models_attentions_weights = list(product(CANONICAL_PRETRAINED_OPEN_LLM_LIST, ATTENTION_CONFIGS, WEIGHTS_CONFIGS.keys())) + LOGGER.info(f'Running a total of {len(models_attentions_weights)} benchmarks, with {len(CANONICAL_PRETRAINED_OPEN_LLM_LIST)} models, {len(ATTENTION_CONFIGS)} attentions implementations and {len(WEIGHTS_CONFIGS)} weights configurations.') + for (model, attn_implementation, weights_config) in models_attentions_weights: + benchmark_cpu_pytorch(model, attn_implementation, weights_config) + +# File: optimum-benchmark-main/llm_perf/update_llm_perf_cuda_pytorch.py +import os +import traceback +from itertools import product +from logging import getLogger +from llm_perf.utils import CANONICAL_PRETRAINED_OPEN_LLM_LIST, GENERATE_KWARGS, INPUT_SHAPES, OPEN_LLM_LIST, PRETRAINED_OPEN_LLM_LIST, is_benchmark_conducted +from optimum_benchmark import Benchmark, BenchmarkConfig, BenchmarkReport, InferenceConfig, ProcessConfig, PyTorchConfig +from optimum_benchmark.logging_utils import setup_logging +SUBSET = os.getenv('SUBSET', None) +MACHINE = os.getenv('MACHINE', None) +if os.getenv('MACHINE', None) is None and os.getenv('SUBSET', None) is None: + PUSH_REPO_ID = 'optimum-benchmark/llm-perf-pytorch-cuda-debug' + CANONICAL_PRETRAINED_OPEN_LLM_LIST = ['gpt2'] + SUBSET = 'unquantized' +elif os.getenv('MACHINE', None) is not None and os.getenv('SUBSET', None) is not None: + PUSH_REPO_ID = f'optimum-benchmark/llm-perf-pytorch-cuda-{SUBSET}-{MACHINE}' +else: + raise ValueError('Either both MACHINE and SUBSET should be set for benchmarking or neither for debugging') +ATTENTION_CONFIGS = ['eager', 'sdpa', 'flash_attention_2'] +if SUBSET == 'unquantized': + WEIGHTS_CONFIGS = {'float32': {'torch_dtype': 'float32', 'quant_scheme': None, 'quant_config': {}}, 'float16': {'torch_dtype': 'float16', 'quant_scheme': None, 'quant_config': {}}, 'bfloat16': {'torch_dtype': 'bfloat16', 'quant_scheme': None, 'quant_config': {}}} +elif SUBSET == 'bnb': + WEIGHTS_CONFIGS = {'4bit-bnb': {'torch_dtype': 'float16', 'quant_scheme': 'bnb', 'quant_config': {'load_in_4bit': True}}, '8bit-bnb': {'torch_dtype': 'float16', 'quant_scheme': 'bnb', 'quant_config': {'load_in_8bit': True}}} +elif SUBSET == 'gptq': + WEIGHTS_CONFIGS = {'4bit-gptq-exllama-v1': {'quant_scheme': 'gptq', 'torch_dtype': 'float16', 'quant_config': {'bits': 4, 'use_exllama ': True, 'version': 1, 'model_seqlen': 256}}, '4bit-gptq-exllama-v2': {'torch_dtype': 'float16', 'quant_scheme': 'gptq', 'quant_config': {'bits': 4, 'use_exllama ': True, 'version': 2, 'model_seqlen': 256}}} +elif SUBSET == 'awq': + WEIGHTS_CONFIGS = {'4bit-awq-gemm': {'torch_dtype': 'float16', 'quant_scheme': 'awq', 'quant_config': {'bits': 4, 'version': 'gemm'}}, '4bit-awq-gemv': {'torch_dtype': 'float16', 'quant_scheme': 'awq', 'quant_config': {'bits': 4, 'version': 'gemv'}}, '4bit-awq-exllama-v1': {'torch_dtype': 'float16', 'quant_scheme': 'awq', 'quant_config': {'bits': 4, 'version': 'exllama', 'exllama_config': {'version': 1, 'max_input_len': 64, 'max_batch_size': 1}}}, '4bit-awq-exllama-v2': {'torch_dtype': 'float16', 'quant_scheme': 'awq', 'quant_config': {'bits': 4, 'version': 'exllama', 'exllama_config': {'version': 2, 'max_input_len': 64, 'max_batch_size': 1}}}} +LOGGER = getLogger('llm-perf-backend') +LOGGER.info(f'len(OPEN_LLM_LIST): {len(OPEN_LLM_LIST)}') +LOGGER.info(f'len(PRETRAINED_OPEN_LLM_LIST): {len(PRETRAINED_OPEN_LLM_LIST)}') +LOGGER.info(f'len(CANONICAL_PRETRAINED_OPEN_LLM_LIST): {len(CANONICAL_PRETRAINED_OPEN_LLM_LIST)}') + +def is_benchmark_supported(weights_config, attn_implementation): + if attn_implementation == 'flash_attention_2' and weights_config == 'float32': + return False + return True + +def benchmark_cuda_pytorch(model, attn_implementation, weights_config): + benchmark_name = f'{weights_config}-{attn_implementation}' + subfolder = f"{benchmark_name}/{model.replace('/', '--')}" + torch_dtype = WEIGHTS_CONFIGS[weights_config]['torch_dtype'] + quant_scheme = WEIGHTS_CONFIGS[weights_config]['quant_scheme'] + quant_config = WEIGHTS_CONFIGS[weights_config]['quant_config'] + if not is_benchmark_supported(weights_config, attn_implementation): + LOGGER.info(f'Skipping benchmark {benchmark_name} with model {model} since it is not supported') + return + if is_benchmark_conducted(PUSH_REPO_ID, subfolder): + LOGGER.info(f'Skipping benchmark {benchmark_name} with model {model} since it was already conducted') + return + launcher_config = ProcessConfig(device_isolation=True, device_isolation_action='kill') + scenario_config = InferenceConfig(memory=True, energy=True, latency=True, duration=10, iterations=10, warmup_runs=10, input_shapes=INPUT_SHAPES, generate_kwargs=GENERATE_KWARGS) + backend_config = PyTorchConfig(model=model, device='cuda', device_ids='0', no_weights=True, library='transformers', task='text-generation', torch_dtype=torch_dtype, quantization_scheme=quant_scheme, quantization_config=quant_config, attn_implementation=attn_implementation, model_kwargs={'trust_remote_code': True}) + benchmark_config = BenchmarkConfig(name=benchmark_name, scenario=scenario_config, launcher=launcher_config, backend=backend_config) + benchmark_config.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + try: + LOGGER.info(f'Running benchmark {benchmark_name} with model {model}') + benchmark_report = Benchmark.launch(benchmark_config) + benchmark_report.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + benchmark = Benchmark(config=benchmark_config, report=benchmark_report) + benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + except Exception: + LOGGER.error(f'Benchmark {benchmark_name} failed with model {model}') + benchmark_report = BenchmarkReport.from_dict({'traceback': traceback.format_exc()}) + benchmark_report.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) + benchmark = Benchmark(config=benchmark_config, report=benchmark_report) + benchmark.push_to_hub(repo_id=PUSH_REPO_ID, subfolder=subfolder, private=True) +if __name__ == '__main__': + os.environ['LOG_TO_FILE'] = '0' + os.environ['LOG_LEVEL'] = 'INFO' + setup_logging(level='INFO', prefix='MAIN-PROCESS') + models_attentions_weights = list(product(CANONICAL_PRETRAINED_OPEN_LLM_LIST, ATTENTION_CONFIGS, WEIGHTS_CONFIGS.keys())) + LOGGER.info(f'Running a total of {len(models_attentions_weights)} benchmarks, with {len(CANONICAL_PRETRAINED_OPEN_LLM_LIST)} models, {len(ATTENTION_CONFIGS)} attentions implementations and {len(WEIGHTS_CONFIGS)} weights configurations.') + for (model, attn_implementation, weights_config) in models_attentions_weights: + benchmark_cuda_pytorch(model, attn_implementation, weights_config) + +# File: optimum-benchmark-main/llm_perf/update_llm_perf_leaderboard.py +import subprocess +from glob import glob +import pandas as pd +from huggingface_hub import create_repo, snapshot_download, upload_file +from tqdm import tqdm +from optimum_benchmark import Benchmark +REPO_TYPE = 'dataset' +MAIN_REPO_ID = 'optimum-benchmark/llm-perf-leaderboard' +PERF_REPO_ID = 'optimum-benchmark/llm-perf-{backend}-{hardware}-{subset}-{machine}' +PERF_DF = 'perf-df-{subset}-{machine}.csv' +LLM_DF = 'llm-df.csv' + +def gather_benchmarks(subset: str, machine: str, backend: str, hardware: str): + perf_repo_id = PERF_REPO_ID.format(subset=subset, machine=machine, backend=backend, hardware=hardware) + snapshot = snapshot_download(repo_type=REPO_TYPE, repo_id=perf_repo_id, allow_patterns=['**/benchmark.json']) + dfs = [] + for file in tqdm(glob(f'{snapshot}/**/benchmark.json', recursive=True)): + dfs.append(Benchmark.from_json(file).to_dataframe()) + benchmarks = pd.concat(dfs, ignore_index=True) + perf_df = PERF_DF.format(subset=subset, machine=machine) + benchmarks.to_csv(perf_df, index=False) + create_repo(repo_id=MAIN_REPO_ID, repo_type=REPO_TYPE, private=False, exist_ok=True) + upload_file(repo_id=MAIN_REPO_ID, repo_type=REPO_TYPE, path_in_repo=perf_df, path_or_fileobj=perf_df) + +def update_perf_dfs(): + for machine in ['1xA10', '1xA100', '1xT4', '32vCPU-C7i']: + for backend in ['pytorch']: + for hardware in ['cuda', 'cpu']: + for subset in ['unquantized', 'bnb', 'awq', 'gptq']: + try: + gather_benchmarks(subset, machine, backend, hardware) + except Exception: + print(f'benchmark for subset: {subset}, machine: {machine}, backend: {backend}, hardware: {hardware} not found') +scrapping_script = '\ngit clone https://github.com/Weyaxi/scrape-open-llm-leaderboard.git\npip install -r scrape-open-llm-leaderboard/requirements.txt\npython scrape-open-llm-leaderboard/main.py\nrm -rf scrape-open-llm-leaderboard\n' + +def update_llm_df(): + subprocess.run(scrapping_script, shell=True) + create_repo(repo_id=MAIN_REPO_ID, repo_type=REPO_TYPE, exist_ok=True, private=False) + upload_file(repo_id=MAIN_REPO_ID, repo_type=REPO_TYPE, path_in_repo=LLM_DF, path_or_fileobj='open-llm-leaderboard.csv') +if __name__ == '__main__': + update_llm_df() + update_perf_dfs() + +# File: optimum-benchmark-main/llm_perf/utils.py +import pandas as pd +from optimum_benchmark.benchmark.report import BenchmarkReport +INPUT_SHAPES = {'batch_size': 1, 'sequence_length': 256} +GENERATE_KWARGS = {'max_new_tokens': 64, 'min_new_tokens': 64} +OPEN_LLM_LEADERBOARD = pd.read_csv('hf://datasets/optimum-benchmark/llm-perf-leaderboard/llm-df.csv') +OPEN_LLM_LIST = OPEN_LLM_LEADERBOARD.drop_duplicates(subset=['Model'])['Model'].tolist() +PRETRAINED_OPEN_LLM_LIST = OPEN_LLM_LEADERBOARD[OPEN_LLM_LEADERBOARD['Type'] == 'pretrained'].drop_duplicates(subset=['Model'])['Model'].tolist() +CANONICAL_PRETRAINED_OPEN_LLM_LIST = ['01-ai/Yi-6B', '01-ai/Yi-34B', 'Deci/DeciLM-7B', 'Deci/DeciCoder-1b', 'EleutherAI/gpt-j-6b', 'EleutherAI/gpt-neo-1.3B', 'EleutherAI/gpt-neo-125m', 'EleutherAI/gpt-neo-2.7B', 'EleutherAI/gpt-neox-20b', 'EleutherAI/polyglot-ko-12.8b', 'EleutherAI/pythia-1.3b', 'EleutherAI/pythia-1.4b', 'EleutherAI/pythia-12b', 'EleutherAI/pythia-160m', 'EleutherAI/pythia-2.7b', 'EleutherAI/pythia-410m', 'EleutherAI/pythia-6.7b', 'EleutherAI/pythia-70m', 'Qwen/Qwen-7B', 'Qwen/Qwen-14B', 'Qwen/Qwen-72B', 'Qwen/Qwen1.5-0.5B', 'Qwen/Qwen1.5-1.8B', 'Qwen/Qwen1.5-4B', 'Qwen/Qwen1.5-7B', 'Qwen/Qwen1.5-14B', 'Qwen/Qwen1.5-32B', 'Qwen/Qwen1.5-72B', 'Qwen/Qwen1.5-110B', 'Qwen/Qwen1.5-MoE-A2.7B', 'Qwen/Qwen2-beta-14B', 'Qwen/Qwen2-beta-72B', 'Salesforce/codegen-6B-nl', 'Salesforce/codegen-16B-nl', 'TencentARC/Mistral_Pro_8B_v0.1', 'databricks/dbrx-base', 'facebook/opt-125m', 'facebook/opt-350m', 'facebook/opt-2.7b', 'facebook/opt-6.7b', 'facebook/opt-13b', 'facebook/opt-30b', 'facebook/opt-66b', 'facebook/xglm-564M', 'facebook/xglm-4.5B', 'facebook/xglm-7.5B', 'google/gemma-2b', 'google/gemma-7b', 'google/recurrentgemma-2b', 'google/recurrentgemma-7b', 'internlm/internlm-20b', 'internlm/internlm2-20b', 'huggyllama/llama-7b', 'huggyllama/llama-13b', 'huggyllama/llama-30b', 'huggyllama/llama-65b', 'meta-llama/Llama-2-7b-hf', 'meta-llama/Llama-2-13b-hf', 'meta-llama/Llama-2-70b-hf', 'meta-llama/Meta-Llama-3-8B', 'meta-llama/Meta-Llama-3-70B', 'microsoft/phi-1_5', 'microsoft/rho-math-1b-v0.1', 'mistralai/Mistral-7B-v0.1', 'mistralai/Mixtral-8x7B-v0.1', *'mistralai/Mixtral-8x22B-v0.1', 'openai-community/gpt2', 'openai-community/gpt2-large', 'stabilityai/stablelm-3b-4e1t', 'stabilityai/stablelm-2-1_6b', 'stabilityai/stablelm-2-12b', 'stabilityai/stablelm-base-alpha-3b', 'stabilityai/stablelm-base-alpha-7b', 'tiiuae/falcon-rw-1b', 'tiiuae/falcon-7b', 'tiiuae/falcon-40b', 'tiiuae/falcon-180B', 'togethercomputer/RedPajama-INCITE-Base-3B-v1', 'togethercomputer/RedPajama-INCITE-Base-7B-v0.1'] + +def is_benchmark_conducted(push_repo_id, subfolder): + try: + report = BenchmarkReport.from_pretrained(repo_id=push_repo_id, subfolder=subfolder) + if 'traceback' in report.to_dict(): + return False + else: + return True + except Exception: + return False + +# File: optimum-benchmark-main/optimum_benchmark/__init__.py +from .backends import BackendConfig, INCConfig, IPEXConfig, LlamaCppConfig, LLMSwarmConfig, ORTConfig, OVConfig, PyTorchConfig, PyTXIConfig, TorchORTConfig, TRTLLMConfig, VLLMConfig +from .benchmark.base import Benchmark +from .benchmark.config import BenchmarkConfig +from .benchmark.report import BenchmarkReport +from .launchers import InlineConfig, LauncherConfig, ProcessConfig, TorchrunConfig +from .scenarios import EnergyStarConfig, InferenceConfig, ScenarioConfig, TrainingConfig +__all__ = ['BackendConfig', 'Benchmark', 'BenchmarkConfig', 'BenchmarkReport', 'EnergyStarConfig', 'InferenceConfig', 'IPEXConfig', 'INCConfig', 'InlineConfig', 'LauncherConfig', 'LLMSwarmConfig', 'ORTConfig', 'OVConfig', 'ProcessConfig', 'PyTorchConfig', 'PyTXIConfig', 'ScenarioConfig', 'TorchORTConfig', 'TorchrunConfig', 'TrainingConfig', 'TRTLLMConfig', 'VLLMConfig', 'LlamaCppConfig'] + +# File: optimum-benchmark-main/optimum_benchmark/backends/__init__.py +from .config import BackendConfig +from .ipex.config import IPEXConfig +from .llama_cpp.config import LlamaCppConfig +from .llm_swarm.config import LLMSwarmConfig +from .neural_compressor.config import INCConfig +from .onnxruntime.config import ORTConfig +from .openvino.config import OVConfig +from .py_txi.config import PyTXIConfig +from .pytorch.config import PyTorchConfig +from .tensorrt_llm.config import TRTLLMConfig +from .torch_ort.config import TorchORTConfig +from .vllm.config import VLLMConfig +__all__ = ['PyTorchConfig', 'ORTConfig', 'IPEXConfig', 'OVConfig', 'TorchORTConfig', 'TRTLLMConfig', 'INCConfig', 'PyTXIConfig', 'LLMSwarmConfig', 'BackendConfig', 'VLLMConfig', 'LlamaCppConfig'] + +# File: optimum-benchmark-main/optimum_benchmark/backends/base.py +import os +from abc import ABC +from collections import OrderedDict +from logging import getLogger +from typing import Any, ClassVar, Dict, Generic, Optional +import datasets.utils.logging as datasets_logging +import transformers.utils.logging as transformers_logging +from safetensors.torch import save_file +from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel, TrainerState, set_seed +from ..import_utils import is_torch_available +from .config import BackendConfigT +from .diffusers_utils import extract_diffusers_shapes_from_model, get_diffusers_automodel_loader_for_task, get_diffusers_pretrained_config +from .timm_utils import extract_timm_shapes_from_config, get_timm_automodel_loader, get_timm_pretrained_config +from .transformers_utils import PretrainedProcessor, extract_transformers_shapes_from_artifacts, get_transformers_automodel_loader_for_task, get_transformers_generation_config, get_transformers_pretrained_config, get_transformers_pretrained_processor +if is_torch_available(): + import torch +datasets_logging.set_verbosity_error() +transformers_logging.set_verbosity_error() + +class Backend(Generic[BackendConfigT], ABC): + NAME: ClassVar[str] + model_type: str + model_shapes: Dict[str, int] + pretrained_model: PreTrainedModel + pretrained_config: Optional[PretrainedConfig] + generation_config: Optional[GenerationConfig] + pretrained_processor: Optional[PretrainedProcessor] + + def __init__(self, config: BackendConfigT): + self.config = config + self.logger = getLogger(self.NAME) + self.logger.info(f'Allocating {self.NAME} backend') + self.logger.info(f'\t+ Seeding backend with {self.config.seed}') + self.seed() + if self.config.library == 'diffusers': + self.logger.info('\t+ Benchmarking a Diffusers pipeline') + self.pretrained_config = get_diffusers_pretrained_config(self.config.model, **self.config.model_kwargs) + self.model_shapes = extract_diffusers_shapes_from_model(self.config.model, **self.config.model_kwargs) + self.automodel_loader = get_diffusers_automodel_loader_for_task(self.config.task) + self.pretrained_processor = None + self.generation_config = None + elif self.config.library == 'timm': + self.logger.info('\t+ Benchmarking a Timm model') + self.pretrained_config = get_timm_pretrained_config(self.config.model) + self.model_shapes = extract_timm_shapes_from_config(self.pretrained_config) + self.automodel_loader = get_timm_automodel_loader() + self.pretrained_processor = None + self.generation_config = None + elif self.config.library == 'llama_cpp': + self.logger.info('\t+ Benchmarking a LlamaCpp model') + self.pretrained_processor = None + self.generation_config = None + self.pretrained_config = None + self.automodel_loader = None + self.model_shapes = extract_transformers_shapes_from_artifacts(self.pretrained_config, self.pretrained_processor) + else: + self.logger.info('\t+ Benchmarking a Transformers model') + self.generation_config = get_transformers_generation_config(self.config.model, **self.config.model_kwargs) + self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) + self.automodel_loader = get_transformers_automodel_loader_for_task(self.config.task) + self.pretrained_processor = get_transformers_pretrained_processor(self.config.processor, **self.config.processor_kwargs) + self.model_shapes = extract_transformers_shapes_from_artifacts(self.pretrained_config, self.pretrained_processor) + + def seed(self) -> None: + set_seed(self.config.seed) + + def create_no_weights_model(self) -> None: + if self.pretrained_config is None: + raise ValueError("Can't create no weights model without a pretrained config") + self.no_weights_model = os.path.join(self.tmpdir.name, 'no_weights_model') + self.logger.info("\t+ Creating no weights model's directory") + os.makedirs(self.no_weights_model, exist_ok=True) + self.logger.info("\t+ Creating no weights model's state dict") + state_dict = torch.nn.Linear(1, 1).state_dict() + self.logger.info("\t+ Saving no weights model's safetensors") + safetensors = os.path.join(self.no_weights_model, 'model.safetensors') + save_file(tensors=state_dict, filename=safetensors, metadata={'format': 'pt'}) + self.logger.info("\t+ Saving no weights model's config") + self.pretrained_config.save_pretrained(save_directory=self.no_weights_model) + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + return inputs + + def load(self) -> None: + raise NotImplementedError('Backend must implement load method') + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + raise NotImplementedError('Backend must implement forward method') + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + raise NotImplementedError('Backend must implement prefill method') + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + raise NotImplementedError('Backend must implement generate method') + + def call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + raise NotImplementedError('Backend must implement call method') + + def train(self, **kwargs) -> TrainerState: + raise NotImplementedError('Backend must implement train method') + +# File: optimum-benchmark-main/optimum_benchmark/backends/config.py +import os +from abc import ABC +from dataclasses import dataclass, field +from logging import getLogger +from typing import Any, Dict, Optional, TypeVar +from psutil import cpu_count +from ..system_utils import get_gpu_device_ids, is_nvidia_system, is_rocm_system +from ..task_utils import infer_library_from_model_name_or_path, infer_model_type_from_model_name_or_path, infer_task_from_model_name_or_path +LOGGER = getLogger('backend') + +@dataclass +class BackendConfig(ABC): + name: str + version: str + _target_: str + task: Optional[str] = None + library: Optional[str] = None + model_type: Optional[str] = None + model: Optional[str] = None + processor: Optional[str] = None + device: Optional[str] = None + device_ids: Optional[str] = None + seed: int = 42 + inter_op_num_threads: Optional[int] = None + intra_op_num_threads: Optional[int] = None + model_kwargs: Dict[str, Any] = field(default_factory=dict) + processor_kwargs: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + if self.model is None: + raise ValueError('`model` must be specified.') + if self.processor is None: + self.processor = self.model + if self.library is None: + self.library = infer_library_from_model_name_or_path(self.model, revision=self.model_kwargs.get('revision', None), token=self.model_kwargs.get('token', None)) + if self.task is None: + self.task = infer_task_from_model_name_or_path(self.model, self.library, revision=self.model_kwargs.get('revision', None), token=self.model_kwargs.get('token', None)) + if self.model_type is None: + self.model_type = infer_model_type_from_model_name_or_path(self.model, self.library, revision=self.model_kwargs.get('revision', None), token=self.model_kwargs.get('token', None), trust_remote_code=self.model_kwargs.get('trust_remote_code', False)) + if self.device is None: + self.device = 'cuda' if is_nvidia_system() or is_rocm_system() else 'cpu' + if ':' in self.device: + LOGGER.warning('`device` was specified using PyTorch format (e.g. `cuda:0`) which is not recommended.') + self.device = self.device.split(':')[0] + self.device_ids = self.device.split(':')[1] + LOGGER.warning(f'`device` and `device_ids` are now set to `{self.device}` and `{self.device_ids}`.') + if self.device not in ['cuda', 'cpu', 'mps', 'xla', 'gpu']: + raise ValueError(f'`device` must be either `cuda`, `cpu`, `mps`, `xla` or `gpu`, but got {self.device}') + if self.device == 'cuda': + if self.device_ids is None: + LOGGER.warning('`device_ids` was not specified, using all available GPUs.') + self.device_ids = get_gpu_device_ids() + LOGGER.warning(f'`device_ids` is now set to `{self.device_ids}` based on system configuration.') + if is_nvidia_system(): + os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' + os.environ['CUDA_VISIBLE_DEVICES'] = self.device_ids + LOGGER.info(f"CUDA_VISIBLE_DEVICES was set to {os.environ['CUDA_VISIBLE_DEVICES']}.") + elif is_rocm_system(): + os.environ['ROCR_VISIBLE_DEVICES'] = self.device_ids + LOGGER.info(f"ROCR_VISIBLE_DEVICES was set to {os.environ['ROCR_VISIBLE_DEVICES']}.") + else: + raise RuntimeError('CUDA device is only supported on systems with NVIDIA or ROCm drivers.') + if self.library not in ['transformers', 'diffusers', 'timm', 'llama_cpp']: + raise ValueError(f'`library` must be either `transformers`, `diffusers`, `timm` or `llama_cpp`, but got {self.library}') + if self.inter_op_num_threads is not None: + if self.inter_op_num_threads == -1: + self.inter_op_num_threads = cpu_count() + if self.intra_op_num_threads is not None: + if self.intra_op_num_threads == -1: + self.intra_op_num_threads = cpu_count() +BackendConfigT = TypeVar('BackendConfigT', bound=BackendConfig) + +# File: optimum-benchmark-main/optimum_benchmark/backends/diffusers_utils.py +import warnings +from typing import Dict +from hydra.utils import get_class +from ..import_utils import is_diffusers_available +if is_diffusers_available(): + import diffusers + from diffusers import DiffusionPipeline + if hasattr(diffusers, 'pipelines') and hasattr(diffusers.pipelines, 'auto_pipeline'): + from diffusers.pipelines.auto_pipeline import AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_INPAINT_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES = {'inpainting': AUTO_INPAINT_PIPELINES_MAPPING.copy(), 'text-to-image': AUTO_TEXT2IMAGE_PIPELINES_MAPPING.copy(), 'image-to-image': AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.copy()} + for (task_name, model_mapping) in TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES.items(): + for (model_type, model_class) in model_mapping.items(): + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES[task_name][model_type] = model_class.__name__ + else: + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES = {} +else: + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES = {} +TASKS_TO_MODEL_LOADERS = {'inpainting': 'AutoPipelineForInpainting', 'text-to-image': 'AutoPipelineForText2Image', 'image-to-image': 'AutoPipelineForImage2Image'} + +def get_diffusers_pretrained_config(model: str, **kwargs) -> Dict[str, int]: + config = DiffusionPipeline.load_config(model, **kwargs) + pipeline_config = config[0] if isinstance(config, tuple) else config + return pipeline_config + +def extract_diffusers_shapes_from_model(model: str, **kwargs) -> Dict[str, int]: + model_config = get_diffusers_pretrained_config(model, **kwargs) + shapes = {} + if 'vae' in model_config: + vae_import_path = model_config['vae'] + vae_class = get_class(f'{vae_import_path[0]}.{vae_import_path[1]}') + vae_config = vae_class.load_config(model, subfolder='vae', **kwargs) + shapes['num_channels'] = vae_config['out_channels'] + shapes['height'] = vae_config['sample_size'] + shapes['width'] = vae_config['sample_size'] + elif 'vae_encoder' in model_config: + vae_import_path = model_config['vae_encoder'] + vae_class = get_class(f'{vae_import_path[0]}.{vae_import_path[1]}') + vae_config = vae_class.load_config(model, subfolder='vae_encoder', **kwargs) + shapes['num_channels'] = vae_config['out_channels'] + shapes['height'] = vae_config['sample_size'] + shapes['width'] = vae_config['sample_size'] + else: + warnings.warn('Could not extract shapes [num_channels, height, width] from diffusion pipeline.') + shapes['num_channels'] = -1 + shapes['height'] = -1 + shapes['width'] = -1 + return shapes + +def get_diffusers_automodel_loader_for_task(task: str): + model_loader_name = TASKS_TO_MODEL_LOADERS[task] + model_loader_class = getattr(diffusers, model_loader_name) + return model_loader_class + +# File: optimum-benchmark-main/optimum_benchmark/backends/ipex/backend.py +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Dict +import torch +from hydra.utils import get_class +from ...import_utils import is_accelerate_available, is_torch_distributed_available +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import IPEXConfig +from .utils import TASKS_TO_IPEXMODEL +if is_accelerate_available(): + from accelerate import Accelerator +if is_torch_distributed_available(): + import torch.distributed + +class IPEXBackend(Backend[IPEXConfig]): + NAME: str = 'ipex' + + def __init__(self, config: IPEXConfig) -> None: + super().__init__(config) + if self.config.task in TASKS_TO_IPEXMODEL: + self.ipexmodel_class = get_class(TASKS_TO_IPEXMODEL[self.config.task]) + self.logger.info(f'\t+ Using IPEXModel class {self.ipexmodel_class.__name__}') + else: + raise NotImplementedError(f'IPEXBackend does not support task {self.config.task}') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights IPEXModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights IPEXModel') + self._load_ipexmodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained IPEXModel') + self._load_ipexmodel_from_pretrained() + self.tmpdir.cleanup() + + def _load_automodel_from_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader.from_pretrained(self.config.model, **self.config.model_kwargs) + + def _load_automodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self._load_automodel_from_pretrained() + self.logger.info('\t+ Tying model weights') + self.pretrained_model.tie_weights() + self.config.model = original_model + + def _load_ipexmodel_from_pretrained(self) -> None: + self.pretrained_model = self.ipexmodel_class.from_pretrained(self.config.model, export=self.config.export, device=self.config.device, **self.config.model_kwargs, **self.automodel_kwargs) + + def _load_ipexmodel_with_no_weights(self) -> None: + with fast_weights_init(): + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + (original_export, self.config.export) = (self.config.export, True) + self.logger.info('\t+ Loading no weights IPEXModel') + self._load_ipexmodel_from_pretrained() + self.config.export = original_export + self.config.model = original_model + + @property + def automodel_kwargs(self) -> Dict[str, Any]: + kwargs = {} + if self.config.torch_dtype is not None: + kwargs['torch_dtype'] = getattr(torch, self.config.torch_dtype) + print(kwargs) + return kwargs + + @property + def is_dp_distributed(self) -> bool: + return is_torch_distributed_available() and torch.distributed.is_initialized() + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + if input_shapes['batch_size'] % torch.distributed.get_world_size() != 0: + raise ValueError(f"Batch size {input_shapes['batch_size']} must be divisible by data parallel world size {torch.distributed.get_world_size()}") + input_shapes['batch_size'] //= torch.distributed.get_world_size() + self.input_shapes = input_shapes + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + with Accelerator().split_between_processes(inputs=inputs, apply_padding=False) as process_inputs: + inputs = process_inputs + return inputs + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.forward(**inputs, **kwargs) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + def call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model(**inputs, **kwargs) + +# File: optimum-benchmark-main/optimum_benchmark/backends/ipex/config.py +from dataclasses import dataclass +from typing import Optional +from ...import_utils import ipex_version +from ..config import BackendConfig +TORCH_DTYPES = ['bfloat16', 'float16', 'float32', 'auto'] + +@dataclass +class IPEXConfig(BackendConfig): + name: str = 'ipex' + version: Optional[str] = ipex_version() + _target_: str = 'optimum_benchmark.backends.ipex.backend.IPEXBackend' + no_weights: bool = False + torch_dtype: Optional[str] = None + export: bool = True + + def __post_init__(self): + super().__post_init__() + self.device = self.device.lower() + if self.device not in ['cpu', 'gpu']: + raise ValueError(f'IPEXBackend only supports CPU devices, got {self.device}') + if self.model_kwargs.get('torch_dtype', None) is not None: + raise ValueError('`torch_dtype` is an explicit argument in the PyTorch backend config. Please remove it from the `model_kwargs` and set it in the backend config directly.') + if self.torch_dtype is not None and self.torch_dtype not in TORCH_DTYPES: + raise ValueError(f'`torch_dtype` must be one of {TORCH_DTYPES}. Got {self.torch_dtype} instead.') + +# File: optimum-benchmark-main/optimum_benchmark/backends/llama_cpp/backend.py +from tempfile import TemporaryDirectory +from typing import Any, Dict +from llama_cpp import Llama +from ..base import Backend +from .config import LlamaCppConfig + +class LlamaCppBackend(Backend[LlamaCppConfig]): + NAME: str = 'llama_cpp' + pretrained_model: Llama + + def __init__(self, config: LlamaCppConfig) -> None: + super().__init__(config) + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + self.logger.info('\t+ Loading pretrained model') + self.load_model_from_pretrained() + self.tmpdir.cleanup() + + def load_model_from_pretrained(self) -> None: + self.pretrained_model = Llama.from_pretrained(repo_id=self.config.model, filename=self.config.filename, **self.llama_cpp_kwargs) + + @property + def llama_cpp_kwargs(self) -> Dict[str, Any]: + return {'embedding': self.config.task == 'feature-extraction', 'verbose': False, 'echo': False} + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + if self.config.task == 'text-generation': + if input_shapes['batch_size'] != 1: + raise ValueError('Batch size must be 1 for LlamaCpp text generation') + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.config.task == 'text-generation': + return {'tokens': inputs['input_ids'].squeeze(0).tolist()} + elif self.config.task == 'feature-extraction': + return {'input': [self.pretrained_model.detokenize(x).decode('utf-8') for x in inputs['input_ids']]} + raise ValueError(f'Task {self.config.task} not supported by {self.NAME}') + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Any: + self.pretrained_model.embed(**inputs) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> list[int]: + next(self.pretrained_model.generate(**inputs)) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> list[int]: + generator = self.pretrained_model.generate(**inputs) + for _ in range(kwargs['max_new_tokens']): + next(generator) + +# File: optimum-benchmark-main/optimum_benchmark/backends/llama_cpp/config.py +from dataclasses import dataclass +from typing import Optional +from ...import_utils import llama_cpp_version +from ..config import BackendConfig + +@dataclass +class LlamaCppConfig(BackendConfig): + name: str = 'llama_cpp' + version: Optional[str] = llama_cpp_version() + _target_: str = 'optimum_benchmark.backends.llama_cpp.backend.LlamaCppBackend' + no_weights: bool = False + filename: Optional[str] = None + + def __post_init__(self): + self.library = 'llama_cpp' + self.model_type = 'llama_cpp' + super().__post_init__() + if self.task not in ['feature-extraction', 'text-generation']: + raise NotImplementedError(f'Task {self.task} is not supported by LlamaCpp backend.') + if self.no_weights: + raise NotImplementedError('`no_weights` benchmarking is not supported by LlamaCpp backend.') + +# File: optimum-benchmark-main/optimum_benchmark/backends/llm_swarm/backend.py +import asyncio +from typing import Any, Dict, List +import torch +from huggingface_hub import AsyncInferenceClient +from llm_swarm import LLMSwarm +from llm_swarm import LLMSwarmConfig as LLMSwarmCfg +from ...task_utils import TEXT_GENERATION_TASKS +from ..base import Backend +from .config import LLMSwarmConfig + +class LLMSwarmBackend(Backend[LLMSwarmConfig]): + NAME: str = 'llm-swarm' + + def __init__(self, config: LLMSwarmConfig) -> None: + super().__init__(config) + if self.config.task not in TEXT_GENERATION_TASKS: + raise NotImplementedError(f'LLM Swarm does not support task {self.config.task}') + + def load(self) -> None: + self.logger.info('\t+ Downloading pretrained model') + self.download_pretrained_model() + self.logger.info('\t+ Preparing generation config') + self.prepare_generation_config() + self.logger.info('\t+ Loading pretrained model') + self.load_model_from_pretrained() + + def load_model_from_pretrained(self) -> None: + self.llm_swarm_config = LLMSwarmCfg(gpus=self.config.gpus, model=self.config.model, instances=self.config.instances, inference_engine=self.config.inference_engine, slurm_template_path=self.config.slurm_template_path, load_balancer_template_path=self.config.load_balancer_template_path, per_instance_max_parallel_requests=self.config.per_instance_max_parallel_requests, revision=self.config.model_kwargs.get('revision', 'main'), debug_endpoint=self.config.debug_endpoint) + self.llm_swarm = LLMSwarm(self.llm_swarm_config).__enter__() + self.client = AsyncInferenceClient(self.llm_swarm.endpoint) + + def download_pretrained_model(self) -> None: + with torch.device('meta'): + self.auto_model_loader.from_pretrained(self.config.model, **self.config.model_kwargs) + + def prepare_generation_config(self) -> None: + self.generation_config.eos_token_id = -100 + self.generation_config.pad_token_id = -100 + model_cache_folder = f'models/{self.config.model}'.replace('/', '--') + model_cache_path = f'{self.config.volume}/{model_cache_folder}' + snapshot_file = f"{model_cache_path}/refs/{self.config.model_kwargs.get('revision', 'main')}" + snapshot_ref = open(snapshot_file, 'r').read().strip() + model_snapshot_path = f'{model_cache_path}/snapshots/{snapshot_ref}' + self.logger.info('\t+ Saving new pretrained generation config') + self.generation_config.save_pretrained(save_directory=model_snapshot_path) + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if 'inputs' in inputs: + inputs = {'prompt': self.pretrained_processor.batch_decode(inputs['inputs'].tolist())} + elif 'input_ids' in inputs: + inputs = {'prompt': self.pretrained_processor.batch_decode(inputs['input_ids'].tolist())} + else: + raise ValueError('inputs must contain either input_ids or inputs') + return inputs + + async def single_client_call(self, prompt: str, kwargs: Dict[str, Any]) -> str: + return await self.client.text_generation(prompt, max_new_tokens=kwargs.get('max_new_tokens', 1)) + + async def batch_client_call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return await asyncio.gather(*(self.single_client_call(p, kwargs) for p in inputs['prompt'])) + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return asyncio.run(self.batch_client_call(inputs, kwargs)) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return asyncio.run(self.batch_client_call(inputs, kwargs)) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return asyncio.run(self.batch_client_call(inputs, kwargs)) + +# File: optimum-benchmark-main/optimum_benchmark/backends/llm_swarm/config.py +from dataclasses import dataclass +from typing import Optional +from ...import_utils import llm_swarm_version +from ..config import BackendConfig + +@dataclass +class LLMSwarmConfig(BackendConfig): + name: str = 'llm-swarm' + version: Optional[str] = llm_swarm_version() + _target_: str = 'optimum_benchmark.backends.llm_swarm.backend.LLMSwarmBackend' + no_weights: bool = False + gpus: int = 8 + instances: int = 1 + inference_engine: str = 'tgi' + volume: str = '/fsx/ilyas/.cache' + per_instance_max_parallel_requests: int = 500 + slurm_template_path: str = '/fsx/ilyas/swarm-templates/tgi_h100.template.slurm' + load_balancer_template_path: str = '/fsx/ilyas/swarm-templates/nginx.template.conf' + debug_endpoint: Optional[str] = None + + def __post_init__(self): + super().__post_init__() + self.hub_kwargs['cache_dir'] = self.volume + +# File: optimum-benchmark-main/optimum_benchmark/backends/neural_compressor/backend.py +import os +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Dict +import torch +from hydra.utils import get_class +from neural_compressor.config import AccuracyCriterion, PostTrainingQuantConfig, TuningCriterion +from optimum.intel.neural_compressor.quantization import INCQuantizer +from ...generators.dataset_generator import DatasetGenerator +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import INCConfig +from .utils import TASKS_TO_INCMODELS + +class INCBackend(Backend[INCConfig]): + NAME: str = 'neural-compressor' + + def __init__(self, config: INCConfig): + super().__init__(config) + if self.config.task in TASKS_TO_INCMODELS: + self.incmodel_class = get_class(TASKS_TO_INCMODELS[self.config.task]) + self.logger.info(f'Using INCModel class {self.incmodel_class.__name__}') + else: + raise NotImplementedError(f'INCBackend does not support task {self.config.task}') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.ptq_quantization: + if self.config.no_weights: + self.logger.info('\t+ Creating no weights AutoModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights AutoModel') + self.load_automodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained AutoModel') + self.load_automodel_from_pretrained() + self.logger.info('\t+ Applying post-training quantization') + self.quantize_automodel() + self.logger.info('\t+ Loading quantized INCModel') + (original_model, self.config.model) = (self.config.model, self.quantized_model) + self.load_incmodel_from_pretrained() + self.config.model = original_model + elif self.config.no_weights: + self.logger.info('\t+ Creating no weights INCModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights INCModel') + self.load_incmodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained INCModel') + self.load_incmodel_from_pretrained() + self.tmpdir.cleanup() + + def load_automodel_from_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader.from_pretrained(self.config.model, **self.config.model_kwargs) + + def load_automodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self.load_automodel_from_pretrained() + self.logger.info('\t+ Tying model weights') + self.pretrained_model.tie_weights() + self.config.model = original_model + + def load_incmodel_from_pretrained(self) -> None: + self.pretrained_model = self.incmodel_class.from_pretrained(self.config.model, **self.config.model_kwargs) + + def load_incmodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self.load_incmodel_from_pretrained() + self.logger.info('\t+ Tying model weights') + self.pretrained_model.model.tie_weights() + self.config.model = original_model + + def create_no_weights_model(self) -> None: + self.no_weights_model = os.path.join(self.tmpdir.name, 'no_weights_model') + self.logger.info('\t+ Creating no weights model directory') + os.makedirs(self.no_weights_model, exist_ok=True) + self.logger.info('\t+ Creating no weights model state dict') + state_dict = torch.nn.Linear(1, 1).state_dict() + self.logger.info('\t+ Saving no weights model pytorch_model.bin') + torch.save(state_dict, os.path.join(self.no_weights_model, 'pytorch_model.bin')) + self.logger.info('\t+ Saving no weights model pretrained config') + self.pretrained_config.save_pretrained(save_directory=self.no_weights_model) + + def quantize_automodel(self) -> None: + self.quantized_model = f'{self.tmpdir.name}/quantized_model' + self.logger.info('\t+ Processing quantization config') + ptq_quantization_config = self.config.ptq_quantization_config.copy() + ptq_quantization_config['accuracy_criterion'] = AccuracyCriterion(**ptq_quantization_config['accuracy_criterion']) + ptq_quantization_config['tuning_criterion'] = TuningCriterion(**ptq_quantization_config['tuning_criterion']) + ptq_quantization_config = PostTrainingQuantConfig(**ptq_quantization_config) + self.logger.info('\t+ Creating quantizer') + quantizer = INCQuantizer.from_pretrained(model=self.pretrained_model, task=self.config.task, seed=self.config.seed, calibration_fn=None, eval_fn=None) + if self.config.calibration: + self.logger.info('\t+ Generating calibration dataset') + dataset_shapes = {'dataset_size': 1, 'sequence_length': 1, **self.model_shapes} + calibration_dataset = DatasetGenerator(task=self.config.task, dataset_shapes=dataset_shapes, model_shapes=self.model_shapes)() + columns_to_be_removed = list(set(calibration_dataset.column_names) - set(quantizer._signature_columns)) + calibration_dataset = calibration_dataset.remove_columns(columns_to_be_removed) + else: + calibration_dataset = None + self.logger.info('\t+ Quantizing model') + quantizer.quantize(save_directory=self.quantized_model, calibration_dataset=calibration_dataset, quantization_config=ptq_quantization_config, remove_unused_columns=True, data_collator=None, file_name=None, batch_size=1) + + @torch.inference_mode() + def forward(self, input: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model(**input, **kwargs) + + @torch.inference_mode() + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + @torch.inference_mode() + def generate(self, input: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**input, **kwargs) + +# File: optimum-benchmark-main/optimum_benchmark/backends/neural_compressor/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from omegaconf import OmegaConf +from ...import_utils import neural_compressor_version +from ..config import BackendConfig +ACCURACY_CRITERION_CONFIG = {'higher_is_better': True, 'criterion': 'relative', 'tolerable_loss': 0.01} +TUNING_CRITERION_CONFIG = {'strategy': 'basic', 'strategy_kwargs': None, 'timeout': 0, 'max_trials': 100, 'objective': 'performance'} +PTQ_QUANTIZATION_CONFIG = {'device': 'cpu', 'backend': 'default', 'domain': 'auto', 'recipes': {}, 'quant_format': 'default', 'inputs': [], 'outputs': [], 'approach': 'static', 'calibration_sampling_size': [100], 'op_type_dict': None, 'op_name_dict': None, 'reduce_range': None, 'example_inputs': None, 'excluded_precisions': [], 'quant_level': 'auto', 'accuracy_criterion': ACCURACY_CRITERION_CONFIG, 'tuning_criterion': TUNING_CRITERION_CONFIG} + +@dataclass +class INCConfig(BackendConfig): + name: str = 'neural-compressor' + version: Optional[str] = neural_compressor_version() + _target_: str = 'optimum_benchmark.backends.neural_compressor.backend.INCBackend' + no_weights: bool = False + ptq_quantization: bool = False + ptq_quantization_config: Dict[str, Any] = field(default_factory=dict) + calibration: bool = False + calibration_config: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + super().__post_init__() + if self.device != 'cpu': + raise ValueError(f'INCBackend only supports CPU devices, got {self.device}') + if self.ptq_quantization: + self.ptq_quantization_config = OmegaConf.to_object(OmegaConf.merge(PTQ_QUANTIZATION_CONFIG, self.ptq_quantization_config)) + if self.ptq_quantization_config['approach'] == 'static' and (not self.calibration): + raise ValueError('Calibration must be enabled when using static quantization.') + +# File: optimum-benchmark-main/optimum_benchmark/backends/onnxruntime/backend.py +import os +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Dict +import torch +from hydra.utils import get_class +from onnxruntime import SessionOptions +from optimum.onnxruntime import ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME, ORTOptimizer, ORTQuantizer +from optimum.onnxruntime.configuration import AutoCalibrationConfig, AutoOptimizationConfig, AutoQuantizationConfig, CalibrationConfig, OptimizationConfig, QuantizationConfig +from ...generators.dataset_generator import DatasetGenerator +from ...import_utils import is_accelerate_available, is_torch_distributed_available +from ...task_utils import TEXT_GENERATION_TASKS +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import ORTConfig +from .utils import TASKS_TO_MODEL_TYPES_TO_ORTPIPELINES, TASKS_TO_ORTMODELS, format_calibration_config, format_quantization_config +if is_accelerate_available(): + from accelerate import Accelerator +if is_torch_distributed_available(): + import torch.distributed + +class ORTBackend(Backend[ORTConfig]): + NAME: str = 'onnxruntime' + + def __init__(self, config: ORTConfig) -> None: + super().__init__(config) + if self.config.task in TASKS_TO_ORTMODELS: + self.ort_model_loader = get_class(TASKS_TO_ORTMODELS[self.config.task]) + self.logger.info(f'Using ORT Model class {self.ort_model_loader.__name__}') + elif self.config.task in TASKS_TO_MODEL_TYPES_TO_ORTPIPELINES: + if self.config.model_type in TASKS_TO_MODEL_TYPES_TO_ORTPIPELINES[self.config.task]: + self.ort_model_loader = get_class(TASKS_TO_MODEL_TYPES_TO_ORTPIPELINES[self.config.task][self.config.model_type]) + self.logger.info(f'Using ORT Pipeline class {self.ort_model_loader.__name__}') + else: + raise NotImplementedError(f'ORTBackend does not support model {self.config.model_type} for task {self.config.task}') + else: + raise NotImplementedError(f'ORTBackend does not support task {self.config.task}') + self.session_options = SessionOptions() + if self.config.session_options: + self.logger.info('\t+ Processing session options') + for (key, value) in self.config.session_options.items(): + setattr(self.session_options, key, value) + + def validate_execution_provider(self) -> None: + if not self.pretrained_model.providers[0] == self.config.provider: + raise ValueError(f'{self.config.provider} is not first in providers list: {self.pretrained_model.providers}') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights ORTModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights ORTModel') + self.load_ortmodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained ORTModel') + self.load_ortmodel_from_pretrained() + if self.is_optimized or self.is_quantized: + (original_model, self.config.model) = (self.config.model, self.pretrained_model.model_save_dir) + if self.is_optimized: + self.logger.info('\t+ Applying ORT optimization') + self.optimize_onnx_files() + self.config.model = self.optimized_model + if self.is_quantized: + self.logger.info('\t+ Applying ORT quantization') + self.quantize_onnx_files() + self.config.model = self.quantized_model + if self.is_optimized or self.is_quantized: + (original_export, self.config.export) = (self.config.export, False) + self.logger.info('\t+ Loading optimized/quantized model') + self.load_ortmodel_from_pretrained() + self.config.export = original_export + self.config.model = original_model + self.logger.info('\t+ Validating requested Execution Provider') + self.validate_execution_provider() + self.logger.info('\t+ Cleaning up backend temporary directory') + self.tmpdir.cleanup() + + def load_ortmodel_from_pretrained(self) -> None: + self.pretrained_model = self.ort_model_loader.from_pretrained(self.config.model, export=self.config.export, session_options=self.session_options, provider_options=self.config.provider_options, use_io_binding=self.config.use_io_binding, provider=self.config.provider, **self.config.model_kwargs, **self.ortmodel_kwargs) + + def load_ortmodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self.load_ortmodel_from_pretrained() + self.config.model = original_model + + @property + def is_optimized(self) -> bool: + return self.config.auto_optimization is not None or self.config.optimization + + @property + def is_quantized(self) -> bool: + return self.config.auto_quantization is not None or self.config.quantization + + @property + def is_calibrated(self) -> bool: + return self.config.auto_calibration is not None or self.config.calibration + + @property + def is_dp_distributed(self) -> bool: + return is_torch_distributed_available() and torch.distributed.is_initialized() + + @property + def ortmodel_kwargs(self) -> Dict[str, Any]: + kwargs = {} + if self.config.task in TEXT_GENERATION_TASKS: + kwargs['use_cache'] = self.config.use_cache + kwargs['use_merged'] = self.config.use_merged + return kwargs + + @property + def onnx_files_names(self): + assert os.path.isdir(self.config.model), f'{self.config.model} is not a directory' + if self.config.use_merged: + return [model for model in os.listdir(self.config.model) if model not in [ONNX_DECODER_NAME, ONNX_DECODER_WITH_PAST_NAME] and model.endswith('.onnx')] + else: + return [file for file in os.listdir(self.config.model) if file.endswith('.onnx')] + + def optimize_onnx_files(self) -> None: + self.logger.info('\t+ Attempting optimization') + self.optimized_model = os.path.join(self.tmpdir.name, 'optimized') + self.logger.info('\t+ Processing optimization config') + if self.config.auto_optimization is not None: + optimization_config = AutoOptimizationConfig.with_optimization_level(optimization_level=self.config.auto_optimization, for_gpu=self.config.device == 'cuda', **self.config.auto_optimization_config) + elif self.config.optimization: + optimization_config = OptimizationConfig(optimize_for_gpu=self.config.device == 'cuda', **self.config.optimization_config) + self.logger.info('\t+ Creating optimizer') + optimizer = ORTOptimizer.from_pretrained(self.config.model, file_names=self.onnx_files_names) + self.logger.info('\t+ Optimizing ORTModel') + optimizer.optimize(optimization_config, save_dir=self.optimized_model, use_external_data_format=None, one_external_file=True, file_suffix='') + if self.pretrained_processor is not None: + self.pretrained_processor.save_pretrained(self.optimized_model) + if self.pretrained_config is not None: + self.pretrained_config.save_pretrained(self.optimized_model) + + def quantize_onnx_files(self) -> None: + self.logger.info('\t+ Attempting quantization') + self.quantized_model = f'{self.tmpdir.name}/quantized_model' + if self.is_calibrated and len(self.onnx_files_names) > 1: + raise NotImplementedError(f'Calibrated/Static Quantization is not supported for models with multiple components. Found {len(self.onnx_files_names)} components.') + self.logger.info('\t+ Processing quantization config') + if self.config.auto_quantization is not None: + auto_quantization_config = format_quantization_config(self.config.auto_quantization_config) + auto_quantization_class = getattr(AutoQuantizationConfig, self.config.auto_quantization) + quantization_config = auto_quantization_class(**auto_quantization_config) + elif self.config.quantization: + quantization_config = format_quantization_config(self.config.quantization_config) + quantization_config = QuantizationConfig(**quantization_config) + if self.is_calibrated: + self.logger.info('\t+ Generating calibration dataset') + dataset_shapes = {'dataset_size': 1, 'sequence_length': 1, **self.model_shapes} + calibration_dataset = DatasetGenerator(task=self.config.task, dataset_shapes=dataset_shapes, model_shapes=self.model_shapes)() + columns_to_be_removed = list(set(calibration_dataset.column_names) - set(self.pretrained_model.input_names)) + calibration_dataset = calibration_dataset.remove_columns(columns_to_be_removed) + self.logger.info('\t+ Processing calibration config') + if self.config.auto_calibration is not None: + self.logger.info('\t+ Processing calibration config') + auto_calibration_method = getattr(AutoCalibrationConfig, self.config.auto_calibration) + calibration_config = auto_calibration_method(calibration_dataset, **self.config.auto_calibration_config) + elif self.config.calibration: + self.logger.info('\t+ Processing calibration config') + calibration_config = format_calibration_config(self.config.calibration_config) + calibration_config = CalibrationConfig(dataset_name='calibration_dataset', dataset_split=calibration_dataset.split, dataset_num_samples=calibration_dataset.num_rows, dataset_config_name=calibration_dataset.config_name, **self.config.calibration_config) + for onnx_file_name in self.onnx_files_names: + self.logger.info(f'\t+ Creating quantizer for {onnx_file_name}') + quantizer = ORTQuantizer.from_pretrained(self.config.model, file_name=onnx_file_name) + if self.is_calibrated: + self.logger.info('\t+ Fitting calibration tensors range') + calibration_tensors_range = quantizer.fit(dataset=calibration_dataset, use_gpu=self.config.device == 'cuda', calibration_config=calibration_config, operators_to_quantize=quantization_config.operators_to_quantize, use_external_data_format=False, force_symmetric_range=False, batch_size=1) + else: + calibration_tensors_range = None + self.logger.info('\t+ Quantizing model') + quantizer.quantize(save_dir=self.quantized_model, quantization_config=quantization_config, calibration_tensors_range=calibration_tensors_range, use_external_data_format=False, preprocessor=None, file_suffix='') + if self.pretrained_processor is not None: + self.pretrained_processor.save_pretrained(self.quantized_model) + if self.pretrained_config is not None: + self.pretrained_config.save_pretrained(self.quantized_model) + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + if input_shapes['batch_size'] % torch.distributed.get_world_size() != 0: + raise ValueError(f"Batch size {input_shapes['batch_size']} must be divisible by data parallel world size {torch.distributed.get_world_size()}") + input_shapes['batch_size'] //= torch.distributed.get_world_size() + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + with Accelerator().split_between_processes(inputs=inputs, apply_padding=False) as process_inputs: + inputs = process_inputs + if self.config.library == 'transformers': + for (key, value) in list(inputs.items()): + if key in ['position_ids', 'token_type_ids']: + if key not in self.pretrained_model.input_names: + inputs.pop(key) + for (key, value) in inputs.items(): + if isinstance(value, torch.Tensor): + inputs[key] = value.to(self.config.device) + return inputs + + @torch.inference_mode() + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.forward(**inputs, **kwargs) + + @torch.inference_mode() + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + @torch.inference_mode() + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + @torch.inference_mode() + def call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model(**inputs, **kwargs) + +# File: optimum-benchmark-main/optimum_benchmark/backends/onnxruntime/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ...import_utils import onnxruntime_version +from ...task_utils import TEXT_GENERATION_TASKS +from ..config import BackendConfig +QUANTIZATION_CONFIG = {'is_static': False, 'format': 'QOperator'} +CALIBRATION_CONFIG = {'method': 'MinMax'} +AUTO_QUANTIZATION_CONFIG = {'is_static': False} +IO_BINDING_LIBRARIES = ['transformers', 'timm'] +IO_BINDING_PROVIDERS = ['CPUExecutionProvider', 'CUDAExecutionProvider'] +DEVICE_PROVIDER_MAP = {'cpu': 'CPUExecutionProvider', 'cuda': 'CUDAExecutionProvider'} + +@dataclass +class ORTConfig(BackendConfig): + name: str = 'onnxruntime' + version: Optional[str] = onnxruntime_version() + _target_: str = 'optimum_benchmark.backends.onnxruntime.backend.ORTBackend' + no_weights: bool = False + export: bool = True + use_cache: bool = True + use_merged: bool = False + torch_dtype: Optional[str] = None + provider: Optional[str] = None + provider_options: Dict[str, Any] = field(default_factory=dict) + use_io_binding: Optional[bool] = None + session_options: Dict[str, Any] = field(default_factory=dict) + auto_optimization: Optional[str] = None + auto_optimization_config: Dict[str, Any] = field(default_factory=dict) + auto_quantization: Optional[str] = None + auto_quantization_config: Dict[str, Any] = field(default_factory=dict) + auto_calibration: Optional[str] = None + auto_calibration_config: Dict[str, Any] = field(default_factory=dict) + optimization: bool = False + optimization_config: Dict[str, Any] = field(default_factory=dict) + quantization: bool = False + quantization_config: Dict[str, Any] = field(default_factory=dict) + calibration: bool = False + calibration_config: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + super().__post_init__() + if self.device not in ['cpu', 'cuda']: + raise ValueError(f'ORTBackend only supports CPU and CUDA devices, got {self.device}') + if not self.no_weights and (not self.export) and (self.torch_dtype is not None): + raise NotImplementedError("Can't convert an exported model's weights to a different dtype.") + if self.provider is None: + self.provider = DEVICE_PROVIDER_MAP[self.device] + if self.use_io_binding is None: + self.use_io_binding = self.provider in IO_BINDING_PROVIDERS and self.library in IO_BINDING_LIBRARIES + if self.provider == 'TensorrtExecutionProvider' and self.task in TEXT_GENERATION_TASKS: + raise NotImplementedError("we don't support TensorRT for text generation tasks") + if self.quantization: + self.quantization_config = {**QUANTIZATION_CONFIG, **self.quantization_config} + if self.quantization_config['is_static'] and self.auto_calibration is None and (not self.calibration): + raise ValueError('Quantization is static but calibration is not enabled. Please enable calibration or disable static quantization.') + if self.auto_quantization is not None: + self.auto_quantization_config = {**AUTO_QUANTIZATION_CONFIG, **self.auto_quantization_config} + if self.auto_quantization_config['is_static'] and self.auto_calibration is None and (not self.calibration): + raise ValueError('Quantization is static but calibration is not enabled. Please enable calibration or disable static quantization.') + if self.calibration: + self.calibration_config = {**CALIBRATION_CONFIG, **self.calibration_config} + +# File: optimum-benchmark-main/optimum_benchmark/backends/onnxruntime/utils.py +from typing import Any, Dict +from onnxruntime.quantization import CalibrationMethod, QuantFormat, QuantizationMode, QuantType +from optimum.pipelines import ORT_SUPPORTED_TASKS +TASKS_TO_ORTMODELS = {task: f"optimum.onnxruntime.{task_dict['class'][0].__name__}" for (task, task_dict) in ORT_SUPPORTED_TASKS.items()} +TASKS_TO_MODEL_TYPES_TO_ORTPIPELINES = {'text-to-image': {'stable-diffusion': 'optimum.onnxruntime.ORTStableDiffusionPipeline', 'stable-diffusion-xl': 'optimum.onnxruntime.ORTStableDiffusionXLPipeline', 'latent-consistency': 'optimum.onnxruntime.ORTLatentConsistencyModelPipeline'}, 'image-to-image': {'stable-diffusion': 'optimum.onnxruntime.ORTStableDiffusionImg2ImgPipeline', 'stable-diffusion-xl': 'optimum.onnxruntime.ORTStableDiffusionImg2ImgXLPipeline'}, 'inpainting': {'stable-diffusion': 'optimum.onnxruntime.ORTStableDiffusionInpaintingPipeline'}} + +def format_calibration_config(calibration_config: Dict[str, Any]) -> None: + if calibration_config.get('method', None) is not None: + calibration_config['method'] = CalibrationMethod[calibration_config['method']] + return calibration_config + +def format_quantization_config(quantization_config: Dict[str, Any]) -> None: + if quantization_config.get('format', None) is not None: + quantization_config['format'] = QuantFormat.from_string(quantization_config['format']) + if quantization_config.get('mode', None) is not None: + quantization_config['mode'] = QuantizationMode.from_string(quantization_config['mode']) + if quantization_config.get('activations_dtype', None) is not None: + quantization_config['activations_dtype'] = QuantType.from_string(quantization_config['activations_dtype']) + if quantization_config.get('weights_dtype', None) is not None: + quantization_config['weights_dtype'] = QuantType.from_string(quantization_config['weights_dtype']) + return quantization_config + +# File: optimum-benchmark-main/optimum_benchmark/backends/openvino/backend.py +import inspect +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Dict +import torch +from hydra.utils import get_class +from openvino.runtime import properties +from optimum.intel.openvino import OVConfig as OVQuantizationConfig +from optimum.intel.openvino import OVQuantizer +from ...generators.dataset_generator import DatasetGenerator +from ...import_utils import is_accelerate_available, is_torch_distributed_available +from ...task_utils import TEXT_GENERATION_TASKS +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import OVConfig +from .utils import TASKS_TO_MODEL_TYPES_TO_OVPIPELINE, TASKS_TO_OVMODEL +if is_accelerate_available(): + from accelerate import Accelerator +if is_torch_distributed_available(): + import torch.distributed + +class OVBackend(Backend[OVConfig]): + NAME: str = 'openvino' + + def __init__(self, config: OVConfig) -> None: + super().__init__(config) + if self.config.task in TASKS_TO_OVMODEL: + self.ovmodel_class = get_class(TASKS_TO_OVMODEL[self.config.task]) + self.logger.info(f'\t+ Using OVModel class {self.ovmodel_class.__name__}') + elif self.config.task in TASKS_TO_MODEL_TYPES_TO_OVPIPELINE: + if self.config.model_type in TASKS_TO_MODEL_TYPES_TO_OVPIPELINE[self.config.task]: + self.ovmodel_class = get_class(TASKS_TO_MODEL_TYPES_TO_OVPIPELINE[self.config.task][self.config.model_type]) + self.logger.info(f'\t+ Using OVPipeline class {self.ovmodel_class.__name__}') + else: + raise NotImplementedError(f'OVBackend does not support model {self.config.model_type} for task {self.config.task}') + else: + raise NotImplementedError(f'OVBackend does not support task {self.config.task}') + if self.config.inter_op_num_threads is not None: + self.logger.info(f'\t+ Setting inter_op_num_threads to {self.config.inter_op_num_threads}') + self.config.openvino_config[properties.inference_num_threads()] = self.config.inter_op_num_threads + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.quantization: + if self.config.no_weights: + self.logger.info('\t+ Creating no weights AutoModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights AutoModel') + self._load_automodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained AutoModel') + self._load_automodel_from_pretrained() + self.logger.info('\t+ Applying post-training quantization') + self.quantize_automodel() + (original_model, self.config.model) = (self.config.model, self.quantized_model) + (original_export, self.config.export) = (self.config.export, False) + self.logger.info('\t+ Loading quantized OVModel') + self._load_ovmodel_from_pretrained() + (self.config.model, self.config.export) = (original_model, original_export) + elif self.config.no_weights: + self.logger.info('\t+ Creating no weights OVModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights OVModel') + self._load_ovmodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained OVModel') + self._load_ovmodel_from_pretrained() + if self.config.reshape: + static_shapes = {key: value for (key, value) in {**self.input_shapes, **self.model_shapes}.items() if key in inspect.getfullargspec(self.pretrained_model.reshape).args} + if 'sequence_length' in static_shapes and 'height' in static_shapes and ('width' in static_shapes): + static_shapes['sequence_length'] = self.model_shapes.get('num_channels') + self.logger.info(f'\t+ Reshaping model with static shapes: {static_shapes}') + self.pretrained_model.reshape(**static_shapes) + if self.config.half: + self.logger.info('\t+ Converting model to half precision') + self.pretrained_model.half() + if self.config.reshape or self.config.half: + self.logger.info('\t+ Compiling model') + self.pretrained_model.compile() + self.tmpdir.cleanup() + + def _load_automodel_from_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader.from_pretrained(self.config.model, **self.config.model_kwargs) + + def _load_automodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self._load_automodel_from_pretrained() + self.logger.info('\t+ Tying model weights') + self.pretrained_model.tie_weights() + self.config.model = original_model + + def _load_ovmodel_from_pretrained(self) -> None: + self.pretrained_model = self.ovmodel_class.from_pretrained(self.config.model, export=self.config.export, ov_config=self.config.openvino_config, device=self.config.device, **self.config.model_kwargs, **self.ovmodel_kwargs) + + def _load_ovmodel_with_no_weights(self) -> None: + with fast_weights_init(): + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + (original_export, self.config.export) = (self.config.export, True) + self.logger.info('\t+ Loading no weights OVModel') + self._load_ovmodel_from_pretrained() + self.config.export = original_export + self.config.model = original_model + + @property + def is_dp_distributed(self) -> bool: + return is_torch_distributed_available() and torch.distributed.is_initialized() + + @property + def ovmodel_kwargs(self) -> Dict[str, Any]: + kwargs = {} + if self.config.task in TEXT_GENERATION_TASKS: + kwargs['use_cache'] = self.config.use_cache + kwargs['use_merged'] = self.config.use_merged + return kwargs + + def quantize_automodel(self) -> None: + self.logger.info('\t+ Attempting quantization') + self.quantized_model = f'{self.tmpdir.name}/quantized_model' + self.logger.info('\t+ Processing quantization config') + quantization_config = OVQuantizationConfig(**self.config.quantization_config) + self.logger.info('\t+ Creating quantizer') + quantizer = OVQuantizer.from_pretrained(self.pretrained_model, task=self.config.task, seed=self.config.seed) + if self.config.calibration: + self.logger.info('\t+ Generating calibration dataset') + dataset_shapes = {'dataset_size': 1, 'sequence_length': 1, **self.model_shapes} + calibration_dataset = DatasetGenerator(task=self.config.task, dataset_shapes=dataset_shapes, model_shapes=self.model_shapes)() + columns_to_be_removed = list(set(calibration_dataset.column_names) - set(quantizer._export_input_names)) + calibration_dataset = calibration_dataset.remove_columns(columns_to_be_removed) + else: + calibration_dataset = None + self.logger.info('\t+ Quantizing model') + quantizer.quantize(save_directory=self.quantized_model, quantization_config=quantization_config, calibration_dataset=calibration_dataset, remove_unused_columns=True, data_collator=None, weights_only=False, file_name=None, batch_size=1) + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + if input_shapes['batch_size'] % torch.distributed.get_world_size() != 0: + raise ValueError(f"Batch size {input_shapes['batch_size']} must be divisible by data parallel world size {torch.distributed.get_world_size()}") + input_shapes['batch_size'] //= torch.distributed.get_world_size() + self.input_shapes = input_shapes + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + with Accelerator().split_between_processes(inputs=inputs, apply_padding=False) as process_inputs: + inputs = process_inputs + return inputs + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.forward(**inputs, **kwargs) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + def call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model(**inputs, **kwargs) + +# File: optimum-benchmark-main/optimum_benchmark/backends/openvino/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ...import_utils import openvino_version +from ..config import BackendConfig + +@dataclass +class OVConfig(BackendConfig): + name: str = 'openvino' + version: Optional[str] = openvino_version() + _target_: str = 'optimum_benchmark.backends.openvino.backend.OVBackend' + no_weights: bool = False + export: bool = True + use_cache: bool = True + use_merged: bool = False + openvino_config: Dict[str, Any] = field(default_factory=dict) + half: bool = False + reshape: bool = False + quantization: bool = False + quantization_config: Dict[str, Any] = field(default_factory=dict) + calibration: bool = False + calibration_config: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + super().__post_init__() + self.device = self.device.lower() + if self.device not in ['cpu', 'gpu']: + raise ValueError(f'OVBackend only supports CPU devices, got {self.device}') + if self.intra_op_num_threads is not None: + raise NotImplementedError('OVBackend does not support intra_op_num_threads') + if self.quantization and (not self.calibration): + raise ValueError('OpenVINO quantization requires enabling calibration.') + +# File: optimum-benchmark-main/optimum_benchmark/backends/openvino/utils.py +TASKS_TO_OVMODEL = {'fill-mask': 'optimum.intel.openvino.OVModelForMaskedLM', 'text-generation': 'optimum.intel.openvino.OVModelForCausalLM', 'text2text-generation': 'optimum.intel.openvino.OVModelForSeq2SeqLM', 'feature-extraction': 'optimum.intel.openvino.OVModelForFeatureExtraction', 'text-classification': 'optimum.intel.openvino.OVModelForSequenceClassification', 'token-classification': 'optimum.intel.openvino.OVModelForTokenClassification', 'question-answering': 'optimum.intel.openvino.OVModelForQuestionAnswering', 'image-classification': 'optimum.intel.openvino.OVModelForImageClassification', 'audio-classification': 'optimum.intel.openvino.OVModelForAudioClassification', 'pix2struct': 'optimum.intel.openvino.OVModelForPix2Struct'} +TASKS_TO_MODEL_TYPES_TO_OVPIPELINE = {'text-to-image': {'lcm': 'optimum.intel.openvino.OVLatentConsistencyModelPipeline', 'stable-diffusion': 'optimum.intel.openvino.OVStableDiffusionPipeline', 'stable-diffusion-xl': 'optimum.intel.openvino.OVStableDiffusionXLPipeline'}} + +# File: optimum-benchmark-main/optimum_benchmark/backends/py_txi/backend.py +import os +from tempfile import TemporaryDirectory +from typing import Any, Dict, List +import torch +from accelerate import init_empty_weights +from py_txi import TEI, TGI, TEIConfig, TGIConfig +from safetensors.torch import save_file +from ...task_utils import TEXT_EMBEDDING_TASKS, TEXT_GENERATION_TASKS +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import PyTXIConfig + +class PyTXIBackend(Backend[PyTXIConfig]): + NAME: str = 'py-txi' + + def __init__(self, config: PyTXIConfig) -> None: + super().__init__(config) + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights model') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights model') + self.load_model_with_no_weights() + else: + self.logger.info('\t+ Downloading pretrained model') + self.download_pretrained_model() + if self.config.task in TEXT_GENERATION_TASKS: + self.logger.info('\t+ Preparing generation config') + self.prepare_generation_config() + self.logger.info('\t+ Loading pretrained model') + self.load_model_from_pretrained() + self.tmpdir.cleanup() + + @property + def volume(self) -> str: + return list(self.config.volumes.keys())[0] + + def download_pretrained_model(self) -> None: + with init_empty_weights(include_buffers=True): + self.automodel_loader.from_pretrained(self.config.model, **self.config.model_kwargs, cache_dir=self.volume) + + def prepare_generation_config(self) -> None: + self.generation_config.eos_token_id = None + self.generation_config.pad_token_id = None + model_cache_folder = f'models/{self.config.model}'.replace('/', '--') + model_cache_path = f'{self.volume}/{model_cache_folder}' + snapshot_file = f"{model_cache_path}/refs/{self.config.model_kwargs.get('revision', 'main')}" + snapshot_ref = open(snapshot_file, 'r').read().strip() + model_snapshot_path = f'{model_cache_path}/snapshots/{snapshot_ref}' + self.logger.info('\t+ Saving new pretrained generation config') + self.generation_config.save_pretrained(save_directory=model_snapshot_path) + + def create_no_weights_model(self) -> None: + self.no_weights_model = os.path.join(self.tmpdir.name, 'no_weights_model') + self.logger.info('\t+ Creating no weights model directory') + os.makedirs(self.no_weights_model, exist_ok=True) + self.logger.info('\t+ Creating no weights model state dict') + state_dict = torch.nn.Linear(1, 1).state_dict() + self.logger.info('\t+ Saving no weights model safetensors') + safetensor = os.path.join(self.no_weights_model, 'model.safetensors') + save_file(tensors=state_dict, filename=safetensor, metadata={'format': 'pt'}) + self.logger.info('\t+ Saving no weights model pretrained config') + self.pretrained_config.save_pretrained(save_directory=self.no_weights_model) + self.logger.info('\t+ Saving no weights model pretrained processor') + self.pretrained_processor.save_pretrained(save_directory=self.no_weights_model) + self.logger.info(f'\t+ Loading no weights model from {self.no_weights_model}') + with fast_weights_init(): + self.pretrained_model = self.automodel_loader.from_pretrained(self.no_weights_model, **self.config.model_kwargs, device_map='auto', _fast_init=False) + self.logger.info('\t+ Saving no weights model') + self.pretrained_model.save_pretrained(save_directory=self.no_weights_model) + del self.pretrained_model + torch.cuda.empty_cache() + if self.config.task in TEXT_GENERATION_TASKS: + self.logger.info('\t+ Modifying generation config for fixed length generation') + self.generation_config.eos_token_id = None + self.generation_config.pad_token_id = None + self.logger.info('\t+ Saving new pretrained generation config') + self.generation_config.save_pretrained(save_directory=self.no_weights_model) + + def load_model_with_no_weights(self) -> None: + (original_volumes, self.config.volumes) = (self.config.volumes, {self.tmpdir.name: {'bind': '/data', 'mode': 'rw'}}) + (original_model, self.config.model) = (self.config.model, '/data/no_weights_model') + self.logger.info('\t+ Loading no weights model') + self.load_model_from_pretrained() + (self.config.model, self.config.volumes) = (original_model, original_volumes) + + def load_model_from_pretrained(self) -> None: + if self.config.task in TEXT_GENERATION_TASKS: + self.pretrained_model = TGI(config=TGIConfig(model_id=self.config.model, gpus=self.config.gpus, devices=self.config.devices, volumes=self.config.volumes, environment=self.config.environment, ports=self.config.ports, dtype=self.config.dtype, sharded=self.config.sharded, quantize=self.config.quantize, num_shard=self.config.num_shard, speculate=self.config.speculate, cuda_graphs=self.config.cuda_graphs, disable_custom_kernels=self.config.disable_custom_kernels, trust_remote_code=self.config.trust_remote_code, max_concurrent_requests=self.config.max_concurrent_requests)) + elif self.config.task in TEXT_EMBEDDING_TASKS: + self.pretrained_model = TEI(config=TEIConfig(model_id=self.config.model, gpus=self.config.gpus, devices=self.config.devices, volumes=self.config.volumes, environment=self.config.environment, ports=self.config.ports, dtype=self.config.dtype, pooling=self.config.pooling, max_concurrent_requests=self.config.max_concurrent_requests)) + else: + raise NotImplementedError(f'TXI does not support task {self.config.task}') + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.config.task in TEXT_GENERATION_TASKS: + inputs = {'prompt': self.pretrained_processor.batch_decode(inputs['input_ids'].tolist())} + elif self.config.task in TEXT_EMBEDDING_TASKS: + inputs = {'text': self.pretrained_processor.batch_decode(inputs['input_ids'].tolist())} + else: + raise NotImplementedError(f'TXI does not support task {self.config.task}') + return inputs + + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return self.pretrained_model.encode(**inputs, **kwargs) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Dict[str, Any]: + return self.pretrained_model.generate(**inputs, do_sample=kwargs.get('do_sample', False), max_new_tokens=kwargs.get('max_new_tokens')) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> List[str]: + return self.pretrained_model.generate(**inputs, do_sample=kwargs.get('do_sample', False), max_new_tokens=kwargs.get('max_new_tokens')) + +# File: optimum-benchmark-main/optimum_benchmark/backends/py_txi/config.py +import os +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Union +from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE +from ...import_utils import py_txi_version +from ...system_utils import is_nvidia_system, is_rocm_system +from ...task_utils import TEXT_EMBEDDING_TASKS, TEXT_GENERATION_TASKS +from ..config import BackendConfig + +@dataclass +class PyTXIConfig(BackendConfig): + name: str = 'py-txi' + version: Optional[str] = py_txi_version() + _target_: str = 'optimum_benchmark.backends.py_txi.backend.PyTXIBackend' + no_weights: bool = False + image: Optional[str] = None + shm_size: str = '1g' + devices: Optional[List[str]] = None + gpus: Optional[Union[str, int]] = None + ports: Dict[str, Any] = field(default_factory=lambda : {'80/tcp': ('127.0.0.1', 0)}, metadata={'help': 'Dictionary of ports to expose from the container.'}) + volumes: Dict[str, Any] = field(default_factory=lambda : {HUGGINGFACE_HUB_CACHE: {'bind': '/data', 'mode': 'rw'}}, metadata={'help': 'Dictionary of volumes to mount inside the container.'}) + environment: List[str] = field(default_factory=lambda : ['HUGGING_FACE_HUB_TOKEN'], metadata={'help': 'List of environment variables to forward to the container from the host.'}) + dtype: Optional[str] = None + max_concurrent_requests: Optional[int] = None + sharded: Optional[str] = None + quantize: Optional[str] = None + num_shard: Optional[int] = None + speculate: Optional[int] = None + cuda_graphs: Optional[bool] = None + disable_custom_kernels: Optional[bool] = None + trust_remote_code: Optional[bool] = None + pooling: Optional[str] = None + + def __post_init__(self): + super().__post_init__() + if self.task not in TEXT_GENERATION_TASKS + TEXT_EMBEDDING_TASKS: + raise NotImplementedError(f'TXI does not support task {self.task}') + if self.device_ids is not None and is_nvidia_system() and (self.gpus is None): + self.gpus = self.device_ids + if self.device_ids is not None and is_rocm_system() and (self.devices is None): + ids = list(map(int, self.device_ids.split(','))) + renderDs = [file for file in os.listdir('/dev/dri') if file.startswith('renderD')] + self.devices = ['/dev/kfd'] + [f'/dev/dri/{renderDs[i]}' for i in ids] + if self.max_concurrent_requests is None: + if self.task in TEXT_GENERATION_TASKS: + self.max_concurrent_requests = 128 + elif self.task in TEXT_EMBEDDING_TASKS: + self.max_concurrent_requests = 512 + if self.task in TEXT_GENERATION_TASKS: + if self.trust_remote_code is None: + self.trust_remote_code = self.model_kwargs.get('trust_remote_code', False) + +# File: optimum-benchmark-main/optimum_benchmark/backends/pytorch/backend.py +import os +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Callable, Dict, List +import torch +from accelerate import Accelerator, init_empty_weights, init_on_device +from datasets import Dataset +from safetensors.torch import save_file +from transformers import AwqConfig, BitsAndBytesConfig, GPTQConfig, Trainer, TrainerCallback, TrainerState, TrainingArguments +from ...import_utils import is_deepspeed_available, is_torch_distributed_available, is_zentorch_available +from ..base import Backend +from ..peft_utils import apply_peft +from ..transformers_utils import fast_weights_init +from .config import PyTorchConfig +if is_deepspeed_available(): + import deepspeed +if is_torch_distributed_available(): + import torch.distributed +if is_zentorch_available(): + import zentorch + +class PyTorchBackend(Backend[PyTorchConfig]): + NAME = 'pytorch' + + def __init__(self, config: PyTorchConfig): + super().__init__(config) + if self.config.inter_op_num_threads is not None: + self.logger.info(f'\t+ Setting pytorch inter_op_num_threads({self.config.inter_op_num_threads}))') + torch.set_num_threads(self.config.inter_op_num_threads) + if self.config.intra_op_num_threads is not None: + self.logger.info(f'\t+ Setting pytorch intra_op_num_threads({self.config.intra_op_num_threads}))') + torch.set_num_interop_threads(self.config.intra_op_num_threads) + if self.config.autocast_enabled: + self.logger.info('\t+ Enabling automatic mixed precision') + torch.set_autocast_enabled(True) + if self.config.autocast_dtype is not None: + if self.config.device == 'cpu': + self.logger.info(f'\t+ Setting autocast cpu dtype to {self.config.autocast_dtype}') + torch.set_autocast_cpu_dtype(getattr(torch, self.config.autocast_dtype)) + elif self.config.device == 'cuda': + self.logger.info(f'\t+ Setting autocast gpu dtype to {self.config.autocast_dtype}') + torch.set_autocast_gpu_dtype(getattr(torch, self.config.autocast_dtype)) + else: + raise ValueError(f'Device {self.config.device} not supported for autocast') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.library == 'transformers': + self.load_transformers_model() + elif self.config.library == 'diffusers': + self.load_diffusers_model() + elif self.config.library == 'timm': + self.load_timm_model() + else: + raise ValueError(f'Library {self.config.library} not supported for PyTorch backend') + self.logger.info('\t+ Cleaning up backend temporary directory') + self.tmpdir.cleanup() + + def load_transformers_model_from_pretrained(self) -> None: + if self.is_quantized: + self.logger.info(f'\t+ Loading {self.quantization_config.quant_method}-quantized model') + self.pretrained_model = self.automodel_loader.from_pretrained(pretrained_model_name_or_path=self.config.model, device_map=self.config.device_map or torch.device(self.config.device), **self.config.model_kwargs, **self.automodel_kwargs) + elif self.config.device_map is not None: + self.logger.info(f'\t+ Loading Transformers model with device map: {self.config.device_map}') + self.pretrained_model = self.automodel_loader.from_pretrained(pretrained_model_name_or_path=self.config.model, device_map=self.config.device_map, **self.config.model_kwargs, **self.automodel_kwargs) + else: + self.logger.info('\t+ Loading Transformers model') + self.pretrained_model = self.automodel_loader.from_pretrained(pretrained_model_name_or_path=self.config.model, **self.config.model_kwargs, **self.automodel_kwargs) + if self.config.device != 'cpu': + self.logger.info(f'\t+ Moving Transformers model to device: {self.config.device}') + self.pretrained_model = self.pretrained_model.to(self.config.device) + + def load_transformers_model_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + if self.config.deepspeed_inference: + with init_empty_weights(include_buffers=False): + self.logger.info('\t+ Loading Transformers model on meta device for fast initialization') + self.pretrained_model = self.automodel_loader.from_pretrained(pretrained_model_name_or_path=self.config.model, **self.config.model_kwargs, **self.automodel_kwargs) + self.pretrained_model.to_empty(device='cpu') + elif self.config.device_map is None and (not self.is_quantized): + with init_on_device(device=torch.device(self.config.device), include_buffers=True): + self.logger.info('\t+ Loading Transformers model using device context manager for fast initialization') + self.pretrained_model = self.automodel_loader.from_pretrained(pretrained_model_name_or_path=self.no_weights_model, **self.config.model_kwargs, **self.automodel_kwargs) + else: + with fast_weights_init(): + self.load_transformers_model_from_pretrained() + self.config.model = original_model + + def load_transformers_model(self): + if self.config.deepspeed_inference and self.is_quantized: + raise ValueError('Deepspeed-Inference is not compatible with Transformers quantization') + if self.is_quantized: + self.logger.info('\t+ Processing quantization config') + self.process_quantization_config() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights model') + self.create_no_weights_model() + self.logger.info('\t+ Loading model with random weights') + self.load_transformers_model_with_no_weights() + else: + self.logger.info('\t+ Loading model with pretrained weights') + self.load_transformers_model_from_pretrained() + if self.config.cache_implementation is not None: + self.logger.info(f'\t+ Setting cache implementation to {self.config.cache_implementation}') + self.pretrained_model.generation_config.cache_implementation = self.config.cache_implementation + if self.config.to_bettertransformer: + self.logger.info('\t+ To BetterTransformer') + self.pretrained_model.to_bettertransformer() + if self.config.eval_mode: + self.logger.info('\t+ Enabling eval mode') + self.pretrained_model.eval() + if self.config.peft_type is not None: + self.logger.info('\t+ Applying PEFT') + self.pretrained_model = apply_peft(self.pretrained_model, self.config.peft_type, self.config.peft_config) + if self.config.deepspeed_inference: + self.logger.info('\t+ Initializing DeepSpeed Inference Engine') + self.pretrained_model = deepspeed.init_inference(model=self.pretrained_model, config=self.config.deepspeed_inference_config) + if self.config.torch_compile: + if self.config.torch_compile_target == 'forward': + self.logger.info('\t+ Using torch.compile on forward') + self.pretrained_model.forward = torch.compile(self.pretrained_model.forward, **self.config.torch_compile_config) + elif self.config.torch_compile_target == 'model': + self.logger.info('\t+ Using torch.compile on model') + self.pretrained_model = torch.compile(self.pretrained_model, **self.config.torch_compile_config) + else: + raise ValueError(f'Target {self.config.torch_compile_target} not supported') + + def load_diffusers_pipeline_from_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader.from_pretrained(self.config.model, device_map=self.config.device_map, **self.config.model_kwargs, **self.automodel_kwargs) + if self.config.device_map is None and self.config.device != 'cpu': + self.logger.info(f'\t+ Moving Diffusion Pipeline to device: {self.config.device}') + self.pretrained_model = self.pretrained_model.to(self.config.device) + + def load_diffusers_model(self): + self.logger.info('\t+ Loading Diffusion Pipeline') + self.logger.info(f'\t+ Using Diffusers Pipeline {self.automodel_loader.__name__}') + if self.config.no_weights: + raise ValueError('No weights model not supported for Diffusers') + else: + self.load_diffusers_pipeline_from_pretrained() + if self.config.torch_compile: + self.logger.info('\t+ Using torch.compile on unet and vae') + self.pretrained_model.unet = torch.compile(self.pretrained_model.unet, **self.config.torch_compile_config) + self.pretrained_model.vae.decode = torch.compile(self.pretrained_model.vae.decode, **self.config.torch_compile_config) + + def load_timm_model_form_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader(model_name=self.config.model) + if self.config.device != 'cpu': + self.logger.info(f'\t+ Moving Timm model to device: {self.config.device}') + self.pretrained_model = self.pretrained_model.to(self.config.device) + + def load_timm_model(self): + self.logger.info('\t+ Loading Timm model') + self.logger.info(f"\t+ Using Timm's {self.automodel_loader.__name__}") + if self.config.no_weights: + raise ValueError('No weights model not supported for Timm') + else: + self.load_timm_model_form_pretrained() + if self.config.torch_compile: + if self.config.torch_compile_target == 'forward': + self.logger.info('\t+ Using torch.compile on forward') + self.pretrained_model.forward = torch.compile(self.pretrained_model.forward, **self.config.torch_compile_config) + elif self.config.torch_compile_target == 'model': + self.logger.info('\t+ Using torch.compile on model') + self.pretrained_model = torch.compile(self.pretrained_model, **self.config.torch_compile_config) + else: + raise ValueError(f'Target {self.config.torch_compile_target} not supported') + + def create_no_weights_model(self) -> None: + if self.pretrained_config is None: + raise ValueError("Can't create no weights model without a pretrained config") + self.no_weights_model = os.path.join(self.tmpdir.name, 'no_weights_model') + self.logger.info('\t+ Creating no weights model directory') + os.makedirs(self.no_weights_model, exist_ok=True) + self.logger.info('\t+ Creating no weights model state dict') + state_dict = torch.nn.Linear(1, 1).state_dict() + if self.is_exllamav2: + self.logger.info('\t+ Adding g_idx to no weights model state dict') + with init_empty_weights(include_buffers=False): + meta_model = self.automodel_loader.from_config(self.pretrained_config) + for (name, module) in meta_model.named_modules(): + if hasattr(module, 'in_features'): + state_dict[name + '.g_idx'] = torch.ones((module.in_features,), dtype=torch.int32) + self.logger.info('\t+ Saving no weights model safetensors') + safetensors = os.path.join(self.no_weights_model, 'model.safetensors') + save_file(tensors=state_dict, filename=safetensors, metadata={'format': 'pt'}) + if self.is_quantized: + self.logger.info("\t+ Adding quantization config to no weights model's pretrained config") + self.pretrained_config.quantization_config = self.quantization_config.to_dict() + self.logger.info('\t+ Saving no weights model pretrained config') + self.pretrained_config.save_pretrained(save_directory=self.no_weights_model) + + def process_quantization_config(self) -> None: + if self.is_gptq_quantized: + self.logger.info('\t+ Processing GPTQ config') + try: + import exllamav2_kernels + except ImportError: + raise ImportError('Tried to import `exllamav2_kernels` but failed. This means that the AutoGPTQ package is either not installed or not compiled with the right torch version. Please install it from source following the instructions at `https://github.com/AutoGPTQ/AutoGPTQ`Or use `python scripts/install_quantization_libs.py --install-autogptq-from-source` in `optimum-benchmark` repository at `https://github.com/huggingface/optimum-benchmark`.') + self.quantization_config = GPTQConfig(**dict(getattr(self.pretrained_config, 'quantization_config', {}), **self.config.quantization_config)) + elif self.is_awq_quantized: + self.logger.info('\t+ Processing AWQ config') + try: + import exlv2_ext + except ImportError: + raise ImportError('Tried to import `exlv2_ext` but failed. This means that the AutoAWQ package is either not installed or not compiled with the right torch version. Please install it from source following the instructions at `https://github.com/casper-hansen/AutoAWQ`Or use `python scripts/install_quantization_libs.py --install-autoawq-from-source` in `optimum-benchmark` repository at `https://github.com/huggingface/optimum-benchmark`.') + self.quantization_config = AwqConfig(**dict(getattr(self.pretrained_config, 'quantization_config', {}), **self.config.quantization_config)) + elif self.is_bnb_quantized: + self.logger.info('\t+ Processing BitsAndBytes config') + self.quantization_config = BitsAndBytesConfig(**dict(getattr(self.pretrained_config, 'quantization_config', {}), **self.config.quantization_config)) + else: + raise ValueError(f'Quantization scheme {self.config.quantization_scheme} not recognized') + + @property + def is_distributed(self) -> bool: + return is_torch_distributed_available() and torch.distributed.is_initialized() + + @property + def is_tp_distributed(self) -> bool: + return self.is_distributed and self.config.deepspeed_inference + + @property + def is_dp_distributed(self) -> bool: + return self.is_distributed and (not self.config.deepspeed_inference) + + @property + def is_quantized(self) -> bool: + return self.config.quantization_scheme is not None or (hasattr(self.pretrained_config, 'quantization_config') and self.pretrained_config.quantization_config.get('quant_method', None) is not None) + + @property + def is_bnb_quantized(self) -> bool: + return self.config.quantization_scheme == 'bnb' or (hasattr(self.pretrained_config, 'quantization_config') and self.pretrained_config.quantization_config.get('quant_method', None) == 'bnb') + + @property + def is_gptq_quantized(self) -> bool: + return self.config.quantization_scheme == 'gptq' or (hasattr(self.pretrained_config, 'quantization_config') and self.pretrained_config.quantization_config.get('quant_method', None) == 'gptq') + + @property + def is_awq_quantized(self) -> bool: + return self.config.quantization_scheme == 'awq' or (hasattr(self.pretrained_config, 'quantization_config') and self.pretrained_config.quantization_config.get('quant_method', None) == 'awq') + + @property + def is_exllamav2(self) -> bool: + return self.is_quantized and (self.is_gptq_quantized or self.is_awq_quantized) and (hasattr(self.pretrained_config, 'quantization_config') and hasattr(self.pretrained_config.quantization_config, 'exllama_config') and (self.pretrained_config.quantization_config.exllama_config.get('version', None) == 2) or ('exllama_config' in self.config.quantization_config and self.config.quantization_config['exllama_config'].get('version', None) == 2)) + + @property + def automodel_kwargs(self) -> Dict[str, Any]: + kwargs = {} + if self.config.torch_dtype is not None: + kwargs['torch_dtype'] = getattr(torch, self.config.torch_dtype) + if self.is_quantized: + kwargs['quantization_config'] = self.quantization_config + if self.config.attn_implementation is not None: + kwargs['attn_implementation'] = self.config.attn_implementation + if self.config.low_cpu_mem_usage is not None: + kwargs['low_cpu_mem_usage'] = self.config.low_cpu_mem_usage + if self.config.no_weights: + kwargs['_fast_init'] = False + return kwargs + + def prepare_input_shapes(self, input_shapes: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + if input_shapes['batch_size'] % torch.distributed.get_world_size() != 0: + raise ValueError(f"Batch size {input_shapes['batch_size']} must be divisible by data parallel world size {torch.distributed.get_world_size()}") + input_shapes['batch_size'] //= torch.distributed.get_world_size() + if self.is_tp_distributed: + if torch.distributed.get_rank() != 0: + input_shapes['batch_size'] = 0 + return input_shapes + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.is_dp_distributed: + with Accelerator().split_between_processes(inputs=inputs, apply_padding=False) as process_inputs: + inputs = process_inputs + if self.config.library == 'timm': + inputs = {'x': inputs['pixel_values']} + for (key, value) in inputs.items(): + if isinstance(value, torch.Tensor): + inputs[key] = value.to(self.config.device) + return inputs + + @torch.inference_mode() + def forward(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.forward(**inputs, **kwargs) + + @torch.inference_mode() + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + assert kwargs.get('max_new_tokens') == kwargs.get('min_new_tokens') == 1, 'For prefilling, max_new_tokens and min_new_tokens must be equal to 1' + return self.pretrained_model.generate(**inputs, **kwargs) + + @torch.inference_mode() + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(**inputs, **kwargs) + + @torch.inference_mode() + def call(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model(**inputs, **kwargs) + + def train(self, training_dataset: Dataset, training_arguments: Dict[str, Any], training_callbacks: List[TrainerCallback], training_data_collator: Callable[[List[Dict[str, Any]]], Dict[str, Any]]) -> TrainerState: + self.logger.info(f'\t+ Wrapping training arguments with {TrainingArguments.__name__}') + training_arguments['use_cpu'] = self.config.device == 'cpu' + training_arguments = TrainingArguments(**training_arguments) + self.logger.info(f'\t+ Wrapping model with {Trainer.__name__}') + trainer = Trainer(args=training_arguments, model=self.pretrained_model, callbacks=training_callbacks, train_dataset=training_dataset, data_collator=training_data_collator) + self.logger.info('\t+ Starting training') + trainer.train() + self.logger.info('\t+ Finished training') + +# File: optimum-benchmark-main/optimum_benchmark/backends/pytorch/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ...import_utils import torch_version +from ...system_utils import is_rocm_system +from ..config import BackendConfig +DEVICE_MAPS = ['auto', 'sequential'] +AMP_DTYPES = ['bfloat16', 'float16'] +TORCH_DTYPES = ['bfloat16', 'float16', 'float32', 'auto'] +QUANTIZATION_CONFIGS = {'bnb': {'llm_int8_threshold': 0.0}, 'gptq': {}, 'awq': {}} + +@dataclass +class PyTorchConfig(BackendConfig): + name: str = 'pytorch' + version: Optional[str] = torch_version() + _target_: str = 'optimum_benchmark.backends.pytorch.backend.PyTorchBackend' + no_weights: bool = False + device_map: Optional[str] = None + torch_dtype: Optional[str] = None + eval_mode: bool = True + to_bettertransformer: bool = False + low_cpu_mem_usage: Optional[bool] = None + attn_implementation: Optional[str] = None + cache_implementation: Optional[str] = None + autocast_enabled: bool = False + autocast_dtype: Optional[str] = None + torch_compile: bool = False + torch_compile_target: str = 'forward' + torch_compile_config: Dict[str, Any] = field(default_factory=dict) + quantization_scheme: Optional[str] = None + quantization_config: Dict[str, Any] = field(default_factory=dict) + deepspeed_inference: bool = False + deepspeed_inference_config: Dict[str, Any] = field(default_factory=dict) + peft_type: Optional[str] = None + peft_config: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + super().__post_init__() + if self.model_kwargs.get('torch_dtype', None) is not None: + raise ValueError('`torch_dtype` is an explicit argument in the PyTorch backend config. Please remove it from the `model_kwargs` and set it in the backend config directly.') + if self.device_map is not None and self.device_map not in DEVICE_MAPS: + raise ValueError(f'`device_map` must be one of {DEVICE_MAPS}. Got {self.device_map} instead.') + if self.torch_dtype is not None and self.torch_dtype not in TORCH_DTYPES: + raise ValueError(f'`torch_dtype` must be one of {TORCH_DTYPES}. Got {self.torch_dtype} instead.') + if self.autocast_dtype is not None and self.autocast_dtype not in AMP_DTYPES: + raise ValueError(f'`autocast_dtype` must be one of {AMP_DTYPES}. Got {self.autocast_dtype} instead.') + if self.quantization_scheme is not None: + if self.quantization_scheme not in QUANTIZATION_CONFIGS: + raise ValueError(f'`quantization_scheme` must be one of {list(QUANTIZATION_CONFIGS.keys())}. Got {self.quantization_scheme} instead.') + if self.quantization_scheme == 'bnb' and is_rocm_system(): + raise ValueError('BitsAndBytes is not supported on ROCm GPUs. Please disable it.') + if self.quantization_config: + QUANTIZATION_CONFIG = QUANTIZATION_CONFIGS[self.quantization_scheme] + self.quantization_config = {**QUANTIZATION_CONFIG, **self.quantization_config} + +# File: optimum-benchmark-main/optimum_benchmark/backends/tensorrt_llm/backend.py +from collections import OrderedDict +from tempfile import TemporaryDirectory +from typing import Any, Dict +from hydra.utils import get_class +from ..base import Backend +from .config import TRTLLMConfig +from .utils import MODEL_TYPE_TO_TRTLLMMODEL + +class TRTLLMBackend(Backend[TRTLLMConfig]): + NAME = 'tensorrt-llm' + + def __init__(self, config: TRTLLMConfig): + super().__init__(config) + if self.config.model_type in MODEL_TYPE_TO_TRTLLMMODEL: + self.trtllm_loader = get_class(MODEL_TYPE_TO_TRTLLMMODEL[self.config.model_type]) + self.logger.info(f'\t+ Using TRTLLMModel class {self.trtllm_loader.__name__}') + else: + raise NotImplementedError(f'TRTLLMBackend does not support model_type {self.config.model_type}') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + self.logger.info('\t+ Loading pretrained TRTLLMModel') + self.load_trtmodel_from_pretrained() + self.logger.info('\t+ Cleaning up backend temporary directory') + self.tmpdir.cleanup() + + def load_trtmodel_from_pretrained(self) -> None: + self.pretrained_model = self.trtllm_loader.from_pretrained(self.config.model, tp=self.config.tp, pp=self.config.pp, dtype=self.config.dtype, use_fp8=self.config.use_fp8, world_size=self.config.world_size, gpus_per_node=self.config.gpus_per_node, use_cuda_graph=self.config.use_cuda_graph, optimization_level=self.config.optimization_level, max_prompt_length=self.config.max_prompt_length, max_batch_size=self.config.max_batch_size, max_new_tokens=self.config.max_new_tokens, max_beam_width=self.config.max_beam_width, **self.config.model_kwargs) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(input_ids=inputs.get('input_ids'), attention_mask=inputs.get('attention_mask'), min_length=kwargs.get('min_new_tokens', -1), max_new_tokens=kwargs.get('max_new_tokens', -1), repetition_penalty=kwargs.get('repetition_penalty', 1.0), length_penalty=kwargs.get('length_penalty', 1.0), pad_token_id=kwargs.get('pad_token_id', 0), bos_token_id=kwargs.get('bos_token_id', 1), eos_token_id=kwargs.get('eos_token_id', 2), temperature=kwargs.get('temperature', 1.0), num_beams=kwargs.get('num_beams', 1), top_p=kwargs.get('top_p', 1.0), top_k=kwargs.get('top_k', 50), seed=kwargs.get('seed', 42)) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> OrderedDict: + return self.pretrained_model.generate(input_ids=inputs.get('input_ids'), attention_mask=inputs.get('attention_mask'), min_length=kwargs.get('min_new_tokens', -1), max_new_tokens=kwargs.get('max_new_tokens', -1), repetition_penalty=kwargs.get('repetition_penalty', 1.0), length_penalty=kwargs.get('length_penalty', 1.0), pad_token_id=kwargs.get('pad_token_id', 0), bos_token_id=kwargs.get('bos_token_id', 1), eos_token_id=kwargs.get('eos_token_id', 2), temperature=kwargs.get('temperature', 1.0), num_beams=kwargs.get('num_beams', 1), top_p=kwargs.get('top_p', 1.0), top_k=kwargs.get('top_k', 50), seed=kwargs.get('seed', 42)) + +# File: optimum-benchmark-main/optimum_benchmark/backends/tensorrt_llm/config.py +from dataclasses import dataclass +from typing import Optional +from ...import_utils import tesnorrt_llm_version +from ..config import BackendConfig +SUPPORTED_DTYPES = ['float16', 'bfloat16', 'float32'] + +@dataclass +class TRTLLMConfig(BackendConfig): + name: str = 'tensorrt-llm' + version: Optional[str] = tesnorrt_llm_version() + _target_: str = 'optimum_benchmark.backends.tensorrt_llm.backend.TRTLLMBackend' + tp: int = 1 + pp: int = 1 + use_fp8: bool = False + dtype: str = 'float16' + optimization_level: int = 2 + use_cuda_graph: bool = False + world_size: int = 1 + gpus_per_node: int = 1 + max_prompt_length: int = 128 + max_new_tokens: int = -1 + max_batch_size: int = 1 + max_beam_width: int = 1 + + def __post_init__(self) -> None: + super().__post_init__() + if self.device != 'cuda': + raise NotImplementedError(f'TRTLLMBackend only supports device cuda, got {self.device}') + if self.dtype not in SUPPORTED_DTYPES: + raise ValueError(f'dtype must be one of float16, bfloat16, float32, got {self.dtype}') + if self.gpus_per_node != self.world_size: + raise ValueError(f'gpus_per_node ({self.gpus_per_node}) != world_size ({self.world_size})') + if self.world_size != self.pp * self.tp: + raise ValueError(f'world_size ({self.gpus_per_node}) != pp ({self.pp}) * tp ({self.tp})') + +# File: optimum-benchmark-main/optimum_benchmark/backends/timm_utils.py +from typing import Any, Dict +from transformers import PretrainedConfig +from ..import_utils import is_timm_available +if is_timm_available(): + from timm import create_model + from timm.models import get_pretrained_cfg, load_model_config_from_hf, parse_model_name + +def get_timm_pretrained_config(model_name: str) -> PretrainedConfig: + (model_source, model_name) = parse_model_name(model_name) + if model_source == 'hf-hub': + (pretrained_cfg, model_name) = load_model_config_from_hf(model_name) + return pretrained_cfg + return get_pretrained_cfg(model_name) + +def extract_timm_shapes_from_config(config: PretrainedConfig) -> Dict[str, Any]: + artifacts_dict = {} + config_dict = {k: v for (k, v) in config.to_dict().items() if v is not None} + artifacts_dict.update(config_dict) + shapes = {} + shapes['num_channels'] = artifacts_dict.get('num_channels', None) + if shapes['num_channels'] is None: + shapes['num_channels'] = artifacts_dict.get('channels', None) + image_size = artifacts_dict.get('image_size', None) + if image_size is None: + image_size = artifacts_dict.get('size', None) + if isinstance(image_size, (int, float)): + shapes['height'] = image_size + shapes['width'] = image_size + elif isinstance(image_size, (list, tuple)): + shapes['height'] = image_size[0] + shapes['width'] = image_size[0] + elif isinstance(image_size, dict) and len(image_size) == 2: + shapes['height'] = list(image_size.values())[0] + shapes['width'] = list(image_size.values())[1] + elif isinstance(image_size, dict) and len(image_size) == 1: + shapes['height'] = list(image_size.values())[0] + shapes['width'] = list(image_size.values())[0] + else: + shapes['height'] = None + shapes['width'] = None + input_size = artifacts_dict.get('input_size', None) + if input_size is not None: + shapes['num_channels'] = input_size[0] + shapes['height'] = input_size[1] + shapes['width'] = input_size[2] + id2label = artifacts_dict.get('id2label', None) + if id2label is not None: + shapes['num_labels'] = len(id2label) + num_classes = artifacts_dict.get('num_classes', None) + if num_classes is not None: + shapes['num_labels'] = num_classes + return shapes + +def get_timm_automodel_loader(): + return create_model + +# File: optimum-benchmark-main/optimum_benchmark/backends/torch_ort/backend.py +from tempfile import TemporaryDirectory +from typing import Any, Callable, Dict, List +import torch +from datasets import Dataset +from optimum.onnxruntime import ORTTrainer, ORTTrainingArguments +from transformers import TrainerCallback +from ..base import Backend +from ..peft_utils import apply_peft +from ..transformers_utils import fast_weights_init +from .config import TorchORTConfig + +class TorchORTBackend(Backend[TorchORTConfig]): + NAME: str = 'torch-ort' + + def __init__(self, config: TorchORTConfig): + super().__init__(config) + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights AutoModel') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights AutoModel') + self.load_automodel_with_no_weights() + else: + self.logger.info('\t+ Loading pretrained AutoModel') + self.load_automodel_from_pretrained() + if self.config.peft_type is not None: + self.logger.info('\t+ Applying PEFT') + self.pretrained_model = apply_peft(self.pretrained_model, self.config.peft_type, self.config.peft_config) + self.logger.info('\t+ Cleaning up backend temporary directory') + self.tmpdir.cleanup() + + def load_automodel_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + with fast_weights_init(): + self.load_automodel_from_pretrained() + self.logger.info('\t+ Tying model weights') + self.pretrained_model.tie_weights() + self.config.model = original_model + + def load_automodel_from_pretrained(self) -> None: + self.pretrained_model = self.automodel_loader.from_pretrained(self.config.model, **self.automodel_kwargs, **self.config.model_kwargs).to(self.config.device) + + @property + def automodel_kwargs(self) -> Dict[str, Any]: + kwargs = {} + if self.config.torch_dtype is not None: + kwargs['torch_dtype'] = getattr(torch, self.config.torch_dtype) + if self.config.attn_implementation is not None: + kwargs['attn_implementation'] = self.config.attn_implementation + return kwargs + + def train(self, training_dataset: Dataset, training_arguments: Dict[str, Any], training_callbacks: List[TrainerCallback], training_data_collator: Callable[[List[Dict[str, Any]]], Dict[str, Any]]): + self.logger.info(f'\t+ Wrapping training arguments with {ORTTrainingArguments.__name__}') + training_arguments = ORTTrainingArguments(**training_arguments) + self.logger.info(f'\t+ Wrapping model with {ORTTrainer.__name__}') + trainer = ORTTrainer(model=self.pretrained_model, args=training_arguments, callbacks=training_callbacks, train_dataset=training_dataset, data_collator=training_data_collator) + self.logger.info('\t+ Starting training') + trainer.train() + self.logger.info('\t+ Finished training') + +# File: optimum-benchmark-main/optimum_benchmark/backends/torch_ort/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ...import_utils import torch_ort_version +from ..config import BackendConfig + +@dataclass +class TorchORTConfig(BackendConfig): + name: str = 'torch-ort' + version: Optional[str] = torch_ort_version() + _target_: str = 'optimum_benchmark.backends.torch_ort.backend.TorchORTBackend' + no_weights: bool = False + torch_dtype: Optional[str] = None + attn_implementation: Optional[str] = 'eager' + peft_type: Optional[str] = None + peft_config: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + super().__post_init__() + if self.device != 'cuda': + raise ValueError(f'TorchORTBackend only supports CUDA devices, got {self.device}') + +# File: optimum-benchmark-main/optimum_benchmark/backends/transformers_utils.py +import warnings +from contextlib import contextmanager +from typing import Any, Dict, Optional, Union +import torch +import transformers +from transformers import AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, FeatureExtractionMixin, GenerationConfig, ImageProcessingMixin, PretrainedConfig, ProcessorMixin, SpecialTokensMixin +from ..import_utils import is_torch_available +TASKS_TO_MODEL_LOADERS = {'feature-extraction': 'AutoModel', 'fill-mask': 'AutoModelForMaskedLM', 'multiple-choice': 'AutoModelForMultipleChoice', 'question-answering': 'AutoModelForQuestionAnswering', 'token-classification': 'AutoModelForTokenClassification', 'text-classification': 'AutoModelForSequenceClassification', 'audio-xvector': 'AutoModelForAudioXVector', 'text-to-audio': 'AutoModelForTextToSpectrogram', 'audio-classification': 'AutoModelForAudioClassification', 'audio-frame-classification': 'AutoModelForAudioFrameClassification', 'mask-generation': 'AutoModel', 'image-to-image': 'AutoModelForImageToImage', 'masked-im': 'AutoModelForMaskedImageModeling', 'object-detection': 'AutoModelForObjectDetection', 'depth-estimation': 'AutoModelForDepthEstimation', 'image-segmentation': 'AutoModelForImageSegmentation', 'image-classification': 'AutoModelForImageClassification', 'semantic-segmentation': 'AutoModelForSemanticSegmentation', 'zero-shot-object-detection': 'AutoModelForZeroShotObjectDetection', 'zero-shot-image-classification': 'AutoModelForZeroShotImageClassification', 'image-to-text': 'AutoModelForVision2Seq', 'text-generation': 'AutoModelForCausalLM', 'text2text-generation': 'AutoModelForSeq2SeqLM', 'visual-question-answering': 'AutoModelForVisualQuestionAnswering', 'automatic-speech-recognition': ('AutoModelForSpeechSeq2Seq', 'AutoModelForCTC')} +if is_torch_available(): + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES = {} + for (task_name, model_loaders) in TASKS_TO_MODEL_LOADERS.items(): + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES[task_name] = {} + if isinstance(model_loaders, str): + model_loaders = (model_loaders,) + for model_loader_name in model_loaders: + model_loader_class = getattr(transformers, model_loader_name) + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES[task_name].update(model_loader_class._model_mapping._model_mapping) +else: + TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES = {} +PretrainedProcessor = Union[FeatureExtractionMixin, ImageProcessingMixin, SpecialTokensMixin, ProcessorMixin] + +def get_transformers_pretrained_config(model: str, **kwargs) -> 'PretrainedConfig': + return AutoConfig.from_pretrained(model, **kwargs) + +def get_transformers_generation_config(model: str, **kwargs) -> Optional['GenerationConfig']: + try: + return GenerationConfig.from_pretrained(model, **kwargs) + except Exception: + return GenerationConfig() + +def get_transformers_pretrained_processor(model: str, **kwargs) -> Optional['PretrainedProcessor']: + try: + return AutoProcessor.from_pretrained(model, **kwargs) + except Exception: + try: + return AutoFeatureExtractor.from_pretrained(model, **kwargs) + except Exception: + try: + return AutoTokenizer.from_pretrained(model, **kwargs) + except Exception: + return None + +def extract_transformers_shapes_from_artifacts(config: Optional['PretrainedConfig']=None, processor: Optional['PretrainedProcessor']=None) -> Dict[str, Any]: + artifacts_dict = {} + if config is not None and hasattr(config, 'to_dict'): + config_dict = {k: v for (k, v) in config.to_dict().items() if v is not None} + artifacts_dict.update(config_dict) + elif config is not None: + try: + config_dict = {k: getattr(config, k) for k in dir(config) if isinstance(getattr(config, k), int)} + artifacts_dict.update(config_dict) + except Exception: + warnings.warn(f'Could not extract shapes from config {config}') + if processor is not None and hasattr(processor, 'to_dict'): + processor_dict = {k: v for (k, v) in processor.to_dict().items() if v is not None} + artifacts_dict.update(processor_dict) + elif processor is not None: + try: + processor_dict = {k: getattr(processor, k) for k in dir(processor) if isinstance(getattr(processor, k), int)} + except Exception: + warnings.warn(f'Could not extract shapes from processor {processor}') + shapes = {} + shapes['vocab_size'] = artifacts_dict.get('vocab_size', None) + shapes['type_vocab_size'] = artifacts_dict.get('type_vocab_size', None) + shapes['max_position_embeddings'] = artifacts_dict.get('max_position_embeddings', None) + if shapes['max_position_embeddings'] is None: + shapes['max_position_embeddings'] = artifacts_dict.get('n_positions', None) + shapes['num_channels'] = artifacts_dict.get('num_channels', None) + if shapes['num_channels'] is None: + shapes['num_channels'] = artifacts_dict.get('channels', None) + image_size = artifacts_dict.get('image_size', None) + if image_size is None: + image_size = artifacts_dict.get('size', None) + if isinstance(image_size, (int, float)): + shapes['height'] = image_size + shapes['width'] = image_size + elif isinstance(image_size, (list, tuple)): + shapes['height'] = image_size[0] + shapes['width'] = image_size[0] + elif isinstance(image_size, dict) and len(image_size) == 2: + shapes['height'] = list(image_size.values())[0] + shapes['width'] = list(image_size.values())[1] + elif isinstance(image_size, dict) and len(image_size) == 1: + shapes['height'] = list(image_size.values())[0] + shapes['width'] = list(image_size.values())[0] + else: + shapes['height'] = None + shapes['width'] = None + input_size = artifacts_dict.get('input_size', None) + if input_size is not None: + shapes['num_channels'] = input_size[0] + shapes['height'] = input_size[1] + shapes['width'] = input_size[2] + id2label = artifacts_dict.get('id2label', None) + if id2label is not None: + shapes['num_labels'] = len(id2label) + num_classes = artifacts_dict.get('num_classes', None) + if num_classes is not None: + shapes['num_labels'] = num_classes + shapes['num_queries'] = artifacts_dict.get('num_queries', None) + if shapes['num_queries'] == 0: + shapes['num_queries'] = 2 + return shapes + +def get_transformers_automodel_loader_for_task(task: str): + model_loader_name = TASKS_TO_MODEL_LOADERS[task] + model_loader_class = getattr(transformers, model_loader_name) + return model_loader_class +TORCH_INIT_FUNCTIONS = {'normal_': torch.nn.init.normal_, 'uniform_': torch.nn.init.uniform_, 'trunc_normal_': torch.nn.init.trunc_normal_, 'xavier_normal_': torch.nn.init.xavier_normal_, 'xavier_uniform_': torch.nn.init.xavier_uniform_, 'kaiming_normal_': torch.nn.init.kaiming_normal_, 'kaiming_uniform_': torch.nn.init.kaiming_uniform_, 'normal': torch.nn.init.normal, 'uniform': torch.nn.init.uniform, 'xavier_normal': torch.nn.init.xavier_normal, 'xavier_uniform': torch.nn.init.xavier_uniform, 'kaiming_normal': torch.nn.init.kaiming_normal, 'kaiming_uniform': torch.nn.init.kaiming_uniform} + +def fast_random_tensor(tensor: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: + return torch.nn.init.uniform_(tensor) + +@contextmanager +def fast_weights_init(): + for (name, init_func) in TORCH_INIT_FUNCTIONS.items(): + if name != 'uniform_': + setattr(torch.nn.init, name, fast_random_tensor) + try: + yield + finally: + for (name, init_func) in TORCH_INIT_FUNCTIONS.items(): + if name != 'uniform_': + setattr(torch.nn.init, name, init_func) + +# File: optimum-benchmark-main/optimum_benchmark/backends/vllm/backend.py +import asyncio +import os +from tempfile import TemporaryDirectory +from typing import Any, Dict, Union +import torch +from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE +from safetensors.torch import save_file +from vllm import AsyncEngineArgs, AsyncLLMEngine, EngineArgs, LLMEngine, SamplingParams +from ...task_utils import TEXT_GENERATION_TASKS +from ..base import Backend +from ..transformers_utils import fast_weights_init +from .config import VLLMConfig + +class VLLMBackend(Backend[VLLMConfig]): + NAME: str = 'vllm' + pretrained_model: Union[LLMEngine, AsyncLLMEngine] + + def __init__(self, config: VLLMConfig) -> None: + super().__init__(config) + if self.config.task not in TEXT_GENERATION_TASKS: + raise NotImplementedError(f'vLLM does not support task {self.config.task}') + + def load(self) -> None: + self.logger.info('\t+ Creating backend temporary directory') + self.tmpdir = TemporaryDirectory() + if self.config.no_weights: + self.logger.info('\t+ Creating no weights model') + self.create_no_weights_model() + self.logger.info('\t+ Loading no weights model') + self.load_model_with_no_weights() + else: + self.logger.info('\t+ Downloading pretrained model') + self.download_pretrained_model() + if self.config.task in TEXT_GENERATION_TASKS: + self.logger.info('\t+ Preparing generation config') + self.prepare_generation_config() + self.logger.info('\t+ Loading pretrained model') + self.load_model_from_pretrained() + self.logger.info('\t+ Cleaning up backend temporary directory') + self.tmpdir.cleanup() + + def download_pretrained_model(self) -> None: + with torch.device('meta'): + self.automodel_loader.from_pretrained(self.config.model, **self.config.model_kwargs) + + def prepare_generation_config(self) -> None: + self.generation_config.eos_token_id = None + self.generation_config.pad_token_id = None + model_cache_folder = f'models/{self.config.model}'.replace('/', '--') + model_cache_path = f'{HUGGINGFACE_HUB_CACHE}/{model_cache_folder}' + snapshot_file = f"{model_cache_path}/refs/{self.config.model_kwargs.get('revision', 'main')}" + snapshot_ref = open(snapshot_file, 'r').read().strip() + model_snapshot_path = f'{model_cache_path}/snapshots/{snapshot_ref}' + self.logger.info('\t+ Saving new pretrained generation config') + self.generation_config.save_pretrained(save_directory=model_snapshot_path) + + def create_no_weights_model(self) -> None: + self.no_weights_model = os.path.join(self.tmpdir.name, 'no_weights_model') + self.logger.info('\t+ Creating no weights model directory') + os.makedirs(self.no_weights_model, exist_ok=True) + self.logger.info('\t+ Creating no weights model state dict') + state_dict = torch.nn.Linear(1, 1).state_dict() + self.logger.info('\t+ Saving no weights model safetensors') + safetensor = os.path.join(self.no_weights_model, 'model.safetensors') + save_file(tensors=state_dict, filename=safetensor, metadata={'format': 'pt'}) + self.logger.info('\t+ Saving no weights model pretrained config') + self.pretrained_config.save_pretrained(save_directory=self.no_weights_model) + self.logger.info('\t+ Saving no weights model pretrained processor') + self.pretrained_processor.save_pretrained(save_directory=self.no_weights_model) + self.logger.info(f'\t+ Loading no weights model from {self.no_weights_model}') + with fast_weights_init(): + self.pretrained_model = self.automodel_loader.from_pretrained(self.no_weights_model, **self.config.model_kwargs, device_map='auto', _fast_init=False) + self.logger.info('\t+ Saving no weights model') + self.pretrained_model.save_pretrained(save_directory=self.no_weights_model) + del self.pretrained_model + torch.cuda.empty_cache() + if self.config.task in TEXT_GENERATION_TASKS: + self.logger.info('\t+ Modifying generation config for fixed length generation') + self.generation_config.eos_token_id = None + self.generation_config.pad_token_id = None + self.logger.info('\t+ Saving new pretrained generation config') + self.generation_config.save_pretrained(save_directory=self.no_weights_model) + + def load_model_with_no_weights(self) -> None: + (original_model, self.config.model) = (self.config.model, self.no_weights_model) + self.logger.info('\t+ Loading no weights model') + self.load_model_from_pretrained() + self.config.model = original_model + + def load_model_from_pretrained(self) -> None: + if self.config.serving_mode == 'offline': + self.pretrained_model = LLMEngine.from_engine_args(EngineArgs(**self.config.to_engine_args())) + else: + self.pretrained_model = AsyncLLMEngine.from_engine_args(AsyncEngineArgs(**self.config.to_engine_args())) + + def prepare_inputs(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + if self.config.task in TEXT_GENERATION_TASKS: + inputs = {'prompts': self.pretrained_processor.batch_decode(inputs['input_ids'])} + else: + raise NotImplementedError(f'vLLM does not support task {self.config.task}') + return inputs + + def batch_offline_engine_generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Any: + for (i, prompt) in enumerate(inputs['prompts']): + self.pretrained_model.add_request(inputs=prompt, request_id=str(i), params=SamplingParams(ignore_eos=True, detokenize=True, seed=self.config.seed, n=kwargs.get('num_return_sequences'), max_tokens=kwargs.get('max_new_tokens'), min_tokens=kwargs.get('min_new_tokens'), use_beam_search=kwargs.get('num_beams') > 1, logits_processors=kwargs.get('logits_processors', None))) + while self.pretrained_model.has_unfinished_requests(): + self.pretrained_model.step() + + async def single_online_engine_generate(self, prompt: str, request_id: str, kwargs: Dict[str, Any]) -> Any: + stream = await self.pretrained_model.add_request(inputs=prompt, request_id=request_id, params=SamplingParams(ignore_eos=True, detokenize=True, seed=self.config.seed, n=kwargs.get('num_return_sequences'), max_tokens=kwargs.get('max_new_tokens'), min_tokens=kwargs.get('min_new_tokens'), use_beam_search=kwargs.get('num_beams') > 1, logits_processors=kwargs.get('logits_processors', None))) + async for _ in stream: + pass + + async def batch_online_engine_generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Any: + tasks = [self.single_online_engine_generate(prompt, str(i), kwargs) for (i, prompt) in enumerate(inputs['prompts'])] + await asyncio.gather(*tasks) + + def prefill(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Dict[str, Any]: + if self.config.serving_mode == 'offline': + self.batch_offline_engine_generate(inputs, kwargs) + else: + asyncio.run(self.batch_online_engine_generate(inputs, kwargs)) + + def generate(self, inputs: Dict[str, Any], kwargs: Dict[str, Any]) -> Any: + if self.config.serving_mode == 'offline': + self.batch_offline_engine_generate(inputs, kwargs) + else: + asyncio.run(self.batch_online_engine_generate(inputs, kwargs)) + +# File: optimum-benchmark-main/optimum_benchmark/backends/vllm/config.py +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ...import_utils import vllm_version +from ..config import BackendConfig + +@dataclass +class VLLMConfig(BackendConfig): + name: str = 'vllm' + version: Optional[str] = vllm_version() + _target_: str = 'optimum_benchmark.backends.vllm.backend.VLLMBackend' + no_weights: bool = False + serving_mode: str = 'online' + engine_args: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + if 'model' in self.engine_args: + raise ValueError('model should not be passed in `backend.engine_args`, use `backend.model` instead') + if 'tokenizer' in self.engine_args: + raise ValueError('tokenizer should not be passed in `backend.engine_args`, use `backend.processor` instead') + if 'device' in self.engine_args: + raise ValueError('device should not be passed in `backend.engine_args`, use `backend.device` instead') + if self.serving_mode not in ['offline', 'online']: + raise ValueError(f"Invalid serving_mode: {self.serving_mode}. Must be 'online' or 'offline'.") + self.model_kwargs = {'revision': self.engine_args.get('revision', 'main'), 'trust_remote_code': self.engine_args.get('trust_remote_code', False), **self.model_kwargs} + self.processor_kwargs = {'revision': self.engine_args.get('tokenizer_revision', 'main'), 'trust_remote_code': self.engine_args.get('trust_remote_code', False), **self.processor_kwargs} + super().__post_init__() + if self.engine_args.get('disable_log_stats', None) is None: + self.engine_args['disable_log_stats'] = True + if self.serving_mode == 'online': + if self.engine_args.get('disable_log_requests', None) is None: + self.engine_args['disable_log_requests'] = True + + def to_engine_args(self) -> Dict[str, Any]: + return dict(model=self.model, tokenizer=self.processor, device=self.device, **self.engine_args) + +# File: optimum-benchmark-main/optimum_benchmark/benchmark/base.py +from dataclasses import dataclass +from typing import TYPE_CHECKING, Type +from hydra.utils import get_class +from ..backends.config import BackendConfig +from ..hub_utils import PushToHubMixin, classproperty +from ..launchers import LauncherConfig +from ..scenarios import ScenarioConfig +from .config import BenchmarkConfig +from .report import BenchmarkReport +if TYPE_CHECKING: + from ..backends.base import Backend + from ..launchers.base import Launcher + from ..scenarios.base import Scenario + +@dataclass +class Benchmark(PushToHubMixin): + config: BenchmarkConfig + report: BenchmarkReport + + def __post_init__(self): + if isinstance(self.config, dict): + self.config = BenchmarkConfig.from_dict(self.config) + elif not isinstance(self.config, BenchmarkConfig): + raise ValueError('config must be either a dict or a BenchmarkConfig instance') + if isinstance(self.report, dict): + self.report = BenchmarkReport.from_dict(self.report) + elif not isinstance(self.report, BenchmarkReport): + raise ValueError('report must be either a dict or a BenchmarkReport instance') + + @classmethod + def launch(cls, config: BenchmarkConfig): + launcher_config: LauncherConfig = config.launcher + launcher_factory: Type[Launcher] = get_class(launcher_config._target_) + launcher: Launcher = launcher_factory(launcher_config) + report = launcher.launch(worker=cls.run, worker_args=[config]) + return report + + @classmethod + def run(cls, config: BenchmarkConfig): + backend_config: BackendConfig = config.backend + backend_factory: Type[Backend] = get_class(backend_config._target_) + backend: Backend = backend_factory(backend_config) + scenario_config: ScenarioConfig = config.scenario + scenario_factory: Type[Scenario] = get_class(scenario_config._target_) + scenario: Scenario = scenario_factory(scenario_config) + report = scenario.run(backend) + return report + + @classproperty + def default_filename(cls) -> str: + return 'benchmark.json' + +# File: optimum-benchmark-main/optimum_benchmark/benchmark/config.py +from dataclasses import dataclass, field +from typing import Any, Dict +from ..hub_utils import PushToHubMixin, classproperty +from ..import_utils import get_hf_libs_info +from ..system_utils import get_system_info + +@dataclass +class BenchmarkConfig(PushToHubMixin): + name: str + backend: Any + scenario: Any + launcher: Any + environment: Dict[str, Any] = field(default_factory=lambda : {**get_system_info(), **get_hf_libs_info()}) + + @classproperty + def default_filename(cls) -> str: + return 'benchmark_config.json' + +# File: optimum-benchmark-main/optimum_benchmark/benchmark/report.py +from dataclasses import dataclass, make_dataclass +from typing import Any, Dict, List, Optional +from ..hub_utils import PushToHubMixin, classproperty +from ..trackers.energy import Efficiency, Energy +from ..trackers.latency import Latency, Throughput +from ..trackers.memory import Memory + +@dataclass +class BenchmarkMeasurements: + memory: Optional[Memory] = None + latency: Optional[Latency] = None + throughput: Optional[Throughput] = None + energy: Optional[Energy] = None + efficiency: Optional[Efficiency] = None + + def __post_init__(self): + if self.memory is not None and isinstance(self.memory, dict): + self.memory = Memory(**self.memory) + if self.latency is not None and isinstance(self.latency, dict): + self.latency = Latency(**self.latency) + if self.throughput is not None and isinstance(self.throughput, dict): + self.throughput = Throughput(**self.throughput) + if self.energy is not None and isinstance(self.energy, dict): + self.energy = Energy(**self.energy) + if self.efficiency is not None and isinstance(self.efficiency, dict): + self.efficiency = Efficiency(**self.efficiency) + + @staticmethod + def aggregate(measurements: List['BenchmarkMeasurements']) -> 'BenchmarkMeasurements': + assert len(measurements) > 0, 'No measurements to aggregate' + m0 = measurements[0] + memory = Memory.aggregate([m.memory for m in measurements]) if m0.memory is not None else None + latency = Latency.aggregate([m.latency for m in measurements]) if m0.latency is not None else None + throughput = Throughput.aggregate([m.throughput for m in measurements]) if m0.throughput is not None else None + energy = Energy.aggregate([m.energy for m in measurements]) if m0.energy is not None else None + efficiency = Efficiency.aggregate([m.efficiency for m in measurements]) if m0.efficiency is not None else None + return BenchmarkMeasurements(memory, latency, throughput, energy, efficiency) + +@dataclass +class BenchmarkReport(PushToHubMixin): + + @classmethod + def from_list(cls, targets: List[str]) -> 'BenchmarkReport': + return cls.from_dict({target: None for target in targets}) + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> 'BenchmarkReport': + return make_dataclass(cls_name=cls.__name__, fields=data.keys(), bases=(cls,))(**data) + + def __post_init__(self): + for target in self.to_dict().keys(): + if getattr(self, target) is None: + setattr(self, target, BenchmarkMeasurements()) + elif isinstance(getattr(self, target), dict): + setattr(self, target, BenchmarkMeasurements(**getattr(self, target))) + + def log_memory(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.memory is not None: + measurements.memory.log(prefix=target) + + def log_latency(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.latency is not None: + measurements.latency.log(prefix=target) + + def log_throughput(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.throughput is not None: + measurements.throughput.log(prefix=target) + + def log_energy(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.energy is not None: + measurements.energy.log(prefix=target) + + def log_efficiency(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.efficiency is not None: + measurements.efficiency.log(prefix=target) + + def log(self): + for target in self.to_dict().keys(): + measurements: BenchmarkMeasurements = getattr(self, target) + if measurements.memory is not None: + measurements.memory.log(prefix=target) + if measurements.latency is not None: + measurements.latency.log(prefix=target) + if measurements.throughput is not None: + measurements.throughput.log(prefix=target) + if measurements.energy is not None: + measurements.energy.log(prefix=target) + if measurements.efficiency is not None: + measurements.efficiency.log(prefix=target) + + @classmethod + def aggregate(cls, reports: List['BenchmarkReport']) -> 'BenchmarkReport': + aggregated_measurements = {} + for target in reports[0].to_dict().keys(): + measurements = [getattr(report, target) for report in reports] + aggregated_measurements[target] = BenchmarkMeasurements.aggregate(measurements) + return cls.from_dict(aggregated_measurements) + + @classproperty + def default_filename(self) -> str: + return 'benchmark_report.json' + +# File: optimum-benchmark-main/optimum_benchmark/cli.py +import glob +import os +from logging import getLogger +import hydra +from hydra.core.config_store import ConfigStore +from omegaconf import DictConfig, OmegaConf +from . import Benchmark, BenchmarkConfig, EnergyStarConfig, INCConfig, InferenceConfig, InlineConfig, IPEXConfig, LlamaCppConfig, LLMSwarmConfig, ORTConfig, OVConfig, ProcessConfig, PyTorchConfig, PyTXIConfig, TorchORTConfig, TorchrunConfig, TrainingConfig, TRTLLMConfig, VLLMConfig +from .logging_utils import setup_logging +LOGGER = getLogger('hydra-cli') +cs = ConfigStore.instance() +cs.store(name='benchmark', node=BenchmarkConfig) +cs.store(group='backend', name=IPEXConfig.name, node=IPEXConfig) +cs.store(group='backend', name=OVConfig.name, node=OVConfig) +cs.store(group='backend', name=PyTorchConfig.name, node=PyTorchConfig) +cs.store(group='backend', name=ORTConfig.name, node=ORTConfig) +cs.store(group='backend', name=TorchORTConfig.name, node=TorchORTConfig) +cs.store(group='backend', name=TRTLLMConfig.name, node=TRTLLMConfig) +cs.store(group='backend', name=INCConfig.name, node=INCConfig) +cs.store(group='backend', name=PyTXIConfig.name, node=PyTXIConfig) +cs.store(group='backend', name=LLMSwarmConfig.name, node=LLMSwarmConfig) +cs.store(group='backend', name=VLLMConfig.name, node=VLLMConfig) +cs.store(group='backend', name=LlamaCppConfig.name, node=LlamaCppConfig) +cs.store(group='scenario', name=TrainingConfig.name, node=TrainingConfig) +cs.store(group='scenario', name=InferenceConfig.name, node=InferenceConfig) +cs.store(group='scenario', name=EnergyStarConfig.name, node=EnergyStarConfig) +cs.store(group='launcher', name=InlineConfig.name, node=InlineConfig) +cs.store(group='launcher', name=ProcessConfig.name, node=ProcessConfig) +cs.store(group='launcher', name=TorchrunConfig.name, node=TorchrunConfig) + +@hydra.main(version_base=None) +def main(config: DictConfig) -> None: + log_level = os.environ.get('LOG_LEVEL', 'INFO') + log_to_file = os.environ.get('LOG_TO_FILE', '1') == '1' + override_benchmarks = os.environ.get('OVERRIDE_BENCHMARKS', '0') == '1' + setup_logging(level=log_level, to_file=log_to_file, prefix='MAIN-PROCESS') + if glob.glob('benchmark_report.json') and (not override_benchmarks): + LOGGER.warning('Benchmark was already conducted in the current directory. If you want to override it, set the environment variable OVERRIDE_BENCHMARKS=1 (in hydra.job.env_set)') + return + benchmark_config: BenchmarkConfig = OmegaConf.to_object(config) + benchmark_config.save_json('benchmark_config.json') + benchmark_report = Benchmark.launch(benchmark_config) + benchmark_report.save_json('benchmark_report.json') + benchmark = Benchmark(config=benchmark_config, report=benchmark_report) + benchmark.save_json('benchmark.json') + +# File: optimum-benchmark-main/optimum_benchmark/generators/dataset_generator.py +from typing import Dict +from datasets import Dataset +from .task_generator import TASKS_TO_GENERATORS, TaskGenerator + +class DatasetGenerator: + task_generator: TaskGenerator + + def __init__(self, task: str, dataset_shapes: Dict[str, int], model_shapes: Dict[str, int]) -> None: + dataset_shapes['batch_size'] = dataset_shapes['dataset_size'] + if task in TASKS_TO_GENERATORS: + shapes = {**dataset_shapes, **model_shapes} + self.task_generator = TASKS_TO_GENERATORS[task](shapes=shapes, with_labels=True) + else: + raise NotImplementedError(f'Task {task} is supported. \nAvailable tasks: {list(TASKS_TO_GENERATORS.keys())}. \nIf you want to add support for this task, please submit a PR or a feature request to optimum-benchmark. \n') + + def __call__(self) -> Dataset: + task_dataset = self.task_generator() + task_dataset = Dataset.from_dict(task_dataset) + task_dataset.set_format(type='torch', columns=list(task_dataset.features.keys())) + return task_dataset + +# File: optimum-benchmark-main/optimum_benchmark/generators/input_generator.py +from typing import Any, Dict +from .task_generator import TASKS_TO_GENERATORS, TaskGenerator + +class InputGenerator: + task_generator: TaskGenerator + + def __init__(self, task: str, input_shapes: Dict[str, int], model_shapes: Dict[str, int]) -> None: + if task in TASKS_TO_GENERATORS: + shapes = {**input_shapes, **model_shapes} + self.task_generator = TASKS_TO_GENERATORS[task](shapes=shapes, with_labels=False) + else: + raise NotImplementedError(f'Task {task} is not supported. Available tasks: {list(TASKS_TO_GENERATORS.keys())}. If you want to add support for this task, please submit a PR or a feature request to optimum-benchmark. ') + + def __call__(self) -> Dict[str, Any]: + task_input = self.task_generator() + return task_input + +# File: optimum-benchmark-main/optimum_benchmark/generators/task_generator.py +import logging +import random +import string +from abc import ABC +from typing import List, Tuple +import torch +LOGGER = logging.getLogger('generators') +DEFAULT_NUM_LABELS = 2 +DEFAULT_VOCAB_SIZE = 2 +DEFAULT_TYPE_VOCAB_SIZE = 2 + +class TaskGenerator(ABC): + + def __init__(self, shapes, with_labels: bool): + self.shapes = shapes + self.with_labels = with_labels + + @staticmethod + def generate_random_integers(min_value: int, max_value: int, shape: Tuple[int]): + return torch.randint(min_value, max_value, shape) + + @staticmethod + def generate_random_floats(min_value: float, max_value: float, shape: Tuple[int]): + return torch.rand(shape) * (max_value - min_value) + min_value + + @staticmethod + def generate_ranges(start: int, stop: int, shape: Tuple[int]): + return torch.arange(start, stop).repeat(shape[0], 1) + + @staticmethod + def generate_random_strings(num_seq: int) -> List[str]: + return [''.join((random.choice(string.ascii_letters + string.digits) for _ in range(random.randint(10, 100)))) for _ in range(num_seq)] + + def __call__(self): + raise NotImplementedError('Generator must implement __call__ method') + +class TextGenerator(TaskGenerator): + + def input_ids(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['vocab_size'] or DEFAULT_VOCAB_SIZE, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def attention_mask(self): + return self.generate_random_integers(min_value=1, max_value=2, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def token_type_ids(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['type_vocab_size'] or DEFAULT_TYPE_VOCAB_SIZE, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def position_ids(self): + return self.generate_ranges(start=0, stop=self.shapes['sequence_length'], shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def requires_token_type_ids(self): + return self.shapes['type_vocab_size'] is not None and self.shapes['type_vocab_size'] > 1 + + def requires_position_ids(self): + return self.shapes['max_position_embeddings'] is not None + +class ImageGenerator(TaskGenerator): + + def pixel_values(self): + return self.generate_random_floats(min_value=0, max_value=1, shape=(self.shapes['batch_size'], self.shapes['num_channels'], self.shapes['height'], self.shapes['width'])) + +class AudioGenerator(TaskGenerator): + + def input_values(self): + return self.generate_random_floats(min_value=-1, max_value=1, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def input_features(self): + return self.generate_random_floats(min_value=-1, max_value=1, shape=(self.shapes['batch_size'], self.shapes['feature_size'], self.shapes['nb_max_frames'])) + +class TextClassificationGenerator(TextGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['batch_size'],)) + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.requires_token_type_ids(): + dummy['token_type_ids'] = self.token_type_ids() + if self.requires_position_ids(): + dummy['position_ids'] = self.position_ids() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class TokenClassificationGenerator(TextGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.requires_token_type_ids(): + dummy['token_type_ids'] = self.token_type_ids() + if self.requires_position_ids(): + dummy['position_ids'] = self.position_ids() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class TextGenerationGenerator(TextGenerator): + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.with_labels: + dummy['labels'] = self.input_ids() + return dummy + +class Text2TextGenerationGenerator(TextGenerator): + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.with_labels: + dummy['labels'] = self.input_ids() + return dummy + +class QuestionAnsweringGenerator(TextGenerator): + + def start_positions(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['sequence_length'], shape=(self.shapes['batch_size'],)) + + def end_positions(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['sequence_length'], shape=(self.shapes['batch_size'],)) + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + dummy['token_type_ids'] = self.token_type_ids() + if self.with_labels: + dummy['start_positions'] = self.start_positions() + dummy['end_positions'] = self.end_positions() + return dummy + +class MaskedLanguageModelingGenerator(TextGenerator): + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.requires_token_type_ids(): + dummy['token_type_ids'] = self.token_type_ids() + if self.requires_position_ids(): + dummy['position_ids'] = self.position_ids() + if self.with_labels: + dummy['labels'] = self.input_ids() + return dummy + +class MultipleChoiceGenerator(TextGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_choices'], shape=(self.shapes['batch_size'],)) + + def __call__(self): + dummy = {} + dummy['input_ids'] = self.input_ids().reshape(self.shapes['batch_size'], 1, self.shapes['sequence_length']).repeat(1, self.shapes['num_choices'], 1) + dummy['attention_mask'] = self.attention_mask().reshape(self.shapes['batch_size'], 1, self.shapes['sequence_length']).repeat(1, self.shapes['num_choices'], 1) + if self.requires_token_type_ids(): + dummy['token_type_ids'] = self.token_type_ids().reshape(self.shapes['batch_size'], 1, self.shapes['sequence_length']).repeat(1, self.shapes['num_choices'], 1) + if self.with_labels: + dummy['label'] = self.labels() + return dummy + +class ImageClassificationGenerator(ImageGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['batch_size'],)) + + def __call__(self): + dummy = {} + dummy['pixel_values'] = self.pixel_values() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class ObjectDetectionGenerator(ImageGenerator): + + def labels(self): + return [{'class_labels': self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['num_queries'],)), 'boxes': self.generate_random_floats(min_value=-1, max_value=1, shape=(self.shapes['num_queries'], 4))} for _ in range(self.shapes['batch_size'])] + + def __call__(self): + dummy = {} + dummy['pixel_values'] = self.pixel_values() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class SemanticSegmentationGenerator(ImageGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['batch_size'], self.shapes['height'], self.shapes['width'])) + + def __call__(self): + dummy = {} + dummy['pixel_values'] = self.pixel_values() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class AudioClassificationGenerator(AudioGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['num_labels'] or DEFAULT_NUM_LABELS, shape=(self.shapes['batch_size'],)) + + def __call__(self): + dummy = {} + dummy['input_values'] = self.input_values() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class AutomaticSpeechRecognitionGenerator(AudioGenerator): + + def labels(self): + return self.generate_random_integers(min_value=0, max_value=self.shapes['vocab_size'] or DEFAULT_TYPE_VOCAB_SIZE, shape=(self.shapes['batch_size'], self.shapes['sequence_length'])) + + def __call__(self): + dummy = {} + dummy['input_values'] = self.input_values() + if self.with_labels: + dummy['labels'] = self.labels() + return dummy + +class PromptGenerator(TaskGenerator): + + def prompt(self): + return self.generate_random_strings(num_seq=self.shapes['batch_size']) + + def __call__(self): + dummy = {} + dummy['prompt'] = self.prompt() + return dummy + +class FeatureExtractionGenerator(TextGenerator, ImageGenerator): + + def __call__(self): + dummy = {} + if self.shapes.get('num_channels', None) is not None and self.shapes.get('height', None) is not None: + dummy['pixel_values'] = self.pixel_values() + else: + dummy['input_ids'] = self.input_ids() + dummy['attention_mask'] = self.attention_mask() + if self.requires_token_type_ids(): + dummy['token_type_ids'] = self.token_type_ids() + if self.requires_position_ids(): + dummy['position_ids'] = self.position_ids() + return dummy +TASKS_TO_GENERATORS = {'feature-extraction': FeatureExtractionGenerator, 'text-classification': TextClassificationGenerator, 'token-classification': TokenClassificationGenerator, 'text-generation': TextGenerationGenerator, 'text2text-generation': Text2TextGenerationGenerator, 'question-answering': QuestionAnsweringGenerator, 'fill-mask': MaskedLanguageModelingGenerator, 'multiple-choice': MultipleChoiceGenerator, 'image-classification': ImageClassificationGenerator, 'object-detection': ObjectDetectionGenerator, 'semantic-segmentation': SemanticSegmentationGenerator, 'text-to-image': PromptGenerator, 'stable-diffusion': PromptGenerator, 'stable-diffusion-xl': PromptGenerator} + +# File: optimum-benchmark-main/optimum_benchmark/hub_utils.py +import os +import tempfile +import time +from dataclasses import asdict, dataclass +from json import dump, load +from logging import getLogger +from typing import Any, Dict, Optional +import pandas as pd +from flatten_dict import flatten, unflatten +from huggingface_hub import create_repo, hf_hub_download, upload_file +from huggingface_hub.utils._errors import HfHubHTTPError +from typing_extensions import Self +LOGGER = getLogger('hub_utils') + +class classproperty: + + def __init__(self, fget): + self.fget = fget + + def __get__(self, obj, owner): + return self.fget(owner) + +@dataclass +class PushToHubMixin: + + def to_dict(self, flat=False) -> Dict[str, Any]: + data = asdict(self) + if flat: + data = flatten(data, reducer='dot') + return data + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> 'PushToHubMixin': + return cls(**data) + + def save_json(self, path: str, flat: bool=False) -> None: + with open(path, 'w') as f: + dump(self.to_dict(flat=flat), f, indent=4) + + @classmethod + def from_json(cls, path: str) -> Self: + with open(path, 'r') as f: + data = load(f) + return cls.from_dict(data) + + def to_dataframe(self) -> pd.DataFrame: + flat_dict_data = self.to_dict(flat=True) + return pd.DataFrame.from_dict(flat_dict_data, orient='index').T + + @classmethod + def from_dataframe(cls, df: pd.DataFrame) -> Self: + data = df.to_dict(orient='records')[0] + for (k, v) in data.items(): + if isinstance(v, str) and v.startswith('[') and v.endswith(']'): + data[k] = eval(v) + if v != v: + data[k] = None + data = unflatten(data, splitter='dot') + return cls.from_dict(data) + + def save_csv(self, path: str) -> None: + self.to_dataframe().to_csv(path, index=False) + + @classmethod + def from_csv(cls, path: str) -> Self: + return cls.from_dataframe(pd.read_csv(path)) + + def push_to_hub(self, repo_id: str, filename: Optional[str]=None, subfolder: Optional[str]=None, **kwargs) -> None: + filename = str(filename or self.default_filename) + subfolder = str(subfolder or self.default_subfolder) + token = kwargs.pop('token', None) + private = kwargs.pop('private', False) + exist_ok = kwargs.pop('exist_ok', True) + repo_type = kwargs.pop('repo_type', 'dataset') + create_repo(repo_id, token=token, private=private, exist_ok=exist_ok, repo_type=repo_type) + with tempfile.TemporaryDirectory() as tmpdir: + path_or_fileobj = os.path.join(tmpdir, filename) + path_in_repo = os.path.join(subfolder, filename) + self.save_json(path_or_fileobj) + try: + upload_file(repo_id=repo_id, path_in_repo=path_in_repo, path_or_fileobj=path_or_fileobj, repo_type=repo_type, token=token, **kwargs) + except HfHubHTTPError as e: + LOGGER.warn('Error while uploading to Hugging Face Hub') + if 'Client Error: Too Many Requests for url' in str(e): + LOGGER.warn('Client Error: Too Many Requests for url. Retrying in 15 seconds.') + time.sleep(15) + upload_file(repo_id=repo_id, path_in_repo=path_in_repo, path_or_fileobj=path_or_fileobj, repo_type=repo_type, token=token, **kwargs) + else: + raise e + + @classmethod + def from_pretrained(cls, repo_id: str, filename: Optional[str]=None, subfolder: Optional[str]=None, **kwargs) -> Self: + filename = str(filename or cls.default_filename) + subfolder = str(subfolder or cls.default_subfolder) + repo_type = kwargs.pop('repo_type', 'dataset') + try: + resolved_file = hf_hub_download(repo_id=repo_id, filename=filename, subfolder=subfolder, repo_type=repo_type, **kwargs) + except HfHubHTTPError as e: + LOGGER.warn('Error while downloading from Hugging Face Hub') + if 'Client Error: Too Many Requests for url' in str(e): + LOGGER.warn('Client Error: Too Many Requests for url. Retrying in 15 seconds.') + time.sleep(15) + resolved_file = hf_hub_download(repo_id=repo_id, filename=filename, subfolder=subfolder, repo_type=repo_type, **kwargs) + else: + raise e + config_dict = cls.from_json(resolved_file) + return config_dict + + @classproperty + def default_filename(self) -> str: + return 'file.json' + + @classproperty + def default_subfolder(self) -> str: + return 'benchmarks' + +# File: optimum-benchmark-main/optimum_benchmark/import_utils.py +import importlib.metadata +import importlib.util +from pathlib import Path +from subprocess import STDOUT, check_output +from typing import Optional +_transformers_available = importlib.util.find_spec('transformers') is not None +_accelerate_available = importlib.util.find_spec('accelerate') is not None +_diffusers_available = importlib.util.find_spec('diffusers') is not None +_optimum_available = importlib.util.find_spec('optimum') is not None +_torch_available = importlib.util.find_spec('torch') is not None +_onnx_available = importlib.util.find_spec('onnx') is not None +_tensorrt_available = importlib.util.find_spec('tensorrt') is not None +_peft_available = importlib.util.find_spec('peft') is not None +_pynvml_available = importlib.util.find_spec('pynvml') is not None +_torch_distributed_available = importlib.util.find_spec('torch.distributed') is not None +_onnxruntime_available = importlib.util.find_spec('onnxruntime') is not None +_ipex_available = importlib.util.find_spec('intel_extension_for_pytorch') is not None +_openvino_available = importlib.util.find_spec('openvino') is not None +_neural_compressor_available = importlib.util.find_spec('neural_compressor') is not None +_codecarbon_available = importlib.util.find_spec('codecarbon') is not None +_amdsmi_available = importlib.util.find_spec('amdsmi') is not None +_tensorflow_available = importlib.util.find_spec('tensorflow') is not None +_timm_available = importlib.util.find_spec('timm') is not None +_diffusers_available = importlib.util.find_spec('diffusers') is not None +_torch_ort_available = importlib.util.find_spec('torch_ort') is not None +_deepspeed_available = importlib.util.find_spec('deepspeed') is not None +_tensorrt_llm_available = importlib.util.find_spec('tensorrt_llm') is not None +_psutil_available = importlib.util.find_spec('psutil') is not None +_optimum_benchmark_available = importlib.util.find_spec('optimum_benchmark') is not None +_py_txi_available = importlib.util.find_spec('py_txi') is not None +_pyrsmi_available = importlib.util.find_spec('pyrsmi') is not None +_llm_swarm_available = importlib.util.find_spec('llm_swarm') is not None +_zentorch_available = importlib.util.find_spec('zentorch') is not None +_vllm_available = importlib.util.find_spec('vllm') is not None +_llama_cpp_available = importlib.util.find_spec('llama-cpp-python') is not None + +def is_vllm_available(): + return _vllm_available + +def is_llama_cpp_available(): + return _llama_cpp_available + +def is_zentorch_available(): + return _zentorch_available + +def is_llm_swarm_available(): + return _llm_swarm_available + +def is_pyrsmi_available(): + return _pyrsmi_available + +def is_py_txi_available(): + return _py_txi_available + +def is_psutil_available(): + return _psutil_available + +def is_transformers_available(): + return _transformers_available + +def is_tensorrt_llm_available(): + return _tensorrt_llm_available + +def is_deepspeed_available(): + return _deepspeed_available + +def is_torch_ort_available(): + return _torch_ort_available + +def is_accelerate_available(): + return _accelerate_available + +def is_diffusers_available(): + return _diffusers_available + +def is_timm_available(): + return _timm_available + +def is_tensorflow_available(): + return _tensorflow_available + +def is_tensorrt_available(): + return _tensorrt_available + +def is_peft_available(): + return _peft_available + +def is_onnx_available(): + return _onnx_available + +def is_optimum_available(): + return _optimum_available + +def is_onnxruntime_available(): + return _onnxruntime_available + +def is_pynvml_available(): + return _pynvml_available + +def is_amdsmi_available(): + return _amdsmi_available + +def is_torch_available(): + return _torch_available + +def is_torch_distributed_available(): + return _torch_distributed_available + +def is_codecarbon_available(): + return _codecarbon_available + +def torch_version(): + if is_torch_available(): + return importlib.metadata.version('torch') + +def tesnorrt_version(): + if is_tensorrt_available(): + return importlib.metadata.version('tensorrt') + +def onnxruntime_version(): + try: + return 'ort:' + importlib.metadata.version('onnxruntime') + except importlib.metadata.PackageNotFoundError: + try: + return 'ort-gpu:' + importlib.metadata.version('onnxruntime-gpu') + except importlib.metadata.PackageNotFoundError: + try: + return 'ort-training:' + importlib.metadata.version('onnxruntime-training') + except importlib.metadata.PackageNotFoundError: + return None + +def openvino_version(): + if _openvino_available: + return importlib.metadata.version('openvino') + +def ipex_version(): + if _ipex_available: + return importlib.metadata.version('intel_extension_for_pytorch') + +def neural_compressor_version(): + if _neural_compressor_available: + return importlib.metadata.version('neural_compressor') + +def optimum_version(): + if _optimum_available: + return importlib.metadata.version('optimum') + +def transformers_version(): + if _transformers_available: + return importlib.metadata.version('transformers') + +def accelerate_version(): + if _accelerate_available: + return importlib.metadata.version('accelerate') + +def diffusers_version(): + if _diffusers_available: + return importlib.metadata.version('diffusers') + +def torch_ort_version(): + if _torch_ort_available: + return importlib.metadata.version('torch_ort') + +def timm_version(): + if _timm_available: + return importlib.metadata.version('timm') + +def peft_version(): + if _peft_available: + return importlib.metadata.version('peft') + +def tesnorrt_llm_version(): + if _tensorrt_llm_available: + return importlib.metadata.version('tensorrt_llm') + +def optimum_benchmark_version(): + if _optimum_benchmark_available: + return importlib.metadata.version('optimum_benchmark') + +def py_txi_version(): + if _py_txi_available: + return importlib.metadata.version('py_txi') + +def llm_swarm_version(): + if _llm_swarm_available: + return importlib.metadata.version('llm_swarm') + +def vllm_version(): + if _vllm_available: + return importlib.metadata.version('vllm') + +def llama_cpp_version(): + if _llama_cpp_available: + return importlib.metadata.version('llama_cpp') + +def get_git_revision_hash(package_name: str) -> Optional[str]: + try: + path = Path(importlib.util.find_spec(package_name).origin).parent + except Exception: + return None + try: + git_hash = check_output(['git', 'rev-parse', 'HEAD'], cwd=path, stderr=STDOUT).strip().decode('utf-8') + except Exception: + return None + return git_hash + +def get_hf_libs_info(): + return {'optimum_benchmark_version': optimum_benchmark_version(), 'optimum_benchmark_commit': get_git_revision_hash('optimum_benchmark'), 'transformers_version': transformers_version() if is_transformers_available() else None, 'transformers_commit': get_git_revision_hash('transformers'), 'accelerate_version': accelerate_version() if is_accelerate_available else None, 'accelerate_commit': get_git_revision_hash('accelerate'), 'diffusers_version': diffusers_version() if is_diffusers_available() else None, 'diffusers_commit': get_git_revision_hash('diffusers'), 'optimum_version': optimum_version() if is_optimum_available() else None, 'optimum_commit': get_git_revision_hash('optimum'), 'timm_version': timm_version() if is_timm_available() else None, 'timm_commit': get_git_revision_hash('timm'), 'peft_version': peft_version() if is_peft_available() else None, 'peft_commit': get_git_revision_hash('peft')} + +# File: optimum-benchmark-main/optimum_benchmark/launchers/__init__.py +from .config import LauncherConfig +from .inline.config import InlineConfig +from .process.config import ProcessConfig +from .torchrun.config import TorchrunConfig +__all__ = ['InlineConfig', 'ProcessConfig', 'TorchrunConfig', 'LauncherConfig'] + +# File: optimum-benchmark-main/optimum_benchmark/launchers/base.py +import os +import shutil +import sys +import tempfile +from abc import ABC +from contextlib import contextmanager +from logging import getLogger +from multiprocessing import Process, set_executable +from typing import Any, Callable, ClassVar, Generic, List, Optional +from ..benchmark.report import BenchmarkReport +from ..system_utils import is_nvidia_system, is_rocm_system +from .config import LauncherConfigT +from .device_isolation_utils import assert_device_isolation +NUMA_EXECUTABLE_CONTENT = '#!/bin/bash\necho "Running with numactl wrapper"\necho "numactl path: {numactl_path}"\necho "numactl args: {numactl_args}"\necho "python path: {python_path}"\necho "python args: $@"\n{numactl_path} {numactl_args} {python_path} "$@"\n' + +class Launcher(Generic[LauncherConfigT], ABC): + NAME: ClassVar[str] + config: LauncherConfigT + + def __init__(self, config: LauncherConfigT): + self.config = config + self.logger = getLogger(self.NAME) + self.logger.info(f'Allocated {self.NAME} launcher') + + def launch(self, worker: Callable[..., BenchmarkReport], worker_args: List[Any]) -> BenchmarkReport: + raise NotImplementedError('Launcher must implement launch method') + + @contextmanager + def device_isolation(self, pid: int, device_ids: Optional[str]=None): + if device_ids is None: + if is_rocm_system(): + device_ids = os.environ.get('ROCR_VISIBLE_DEVICES', None) + elif is_nvidia_system(): + device_ids = os.environ.get('CUDA_VISIBLE_DEVICES', None) + self.device_isolation_process = Process(target=assert_device_isolation, kwargs={'action': self.config.device_isolation_action, 'device_ids': device_ids, 'pid': pid}, daemon=True) + self.device_isolation_process.start() + self.logger.info(f'\t+ Isolating device(s) [{device_ids}] for process [{pid}] and its children') + self.logger.info(f'\t+ Executing action [{self.config.device_isolation_action}] in case of violation') + yield + self.logger.info('\t+ Stopping device isolation process') + self.device_isolation_process.terminate() + self.device_isolation_process.join() + self.device_isolation_process.close() + + @contextmanager + def numactl_executable(self): + self.logger.info('\t+ Creating numactl wrapper executable for multiprocessing') + python_path = sys.executable + numactl_path = shutil.which('numactl') + if numactl_path is None: + raise RuntimeError('ِCould not find numactl executable. Please install numactl and try again.') + numactl_args = ' '.join([f'--{key}={value}' for (key, value) in self.config.numactl_kwargs.items()]) + numa_executable = tempfile.NamedTemporaryFile(delete=False, prefix='numa_executable_', suffix='.sh') + numa_executable_content = NUMA_EXECUTABLE_CONTENT.format(numactl_path=numactl_path, numactl_args=numactl_args, python_path=python_path) + numa_executable.write(numa_executable_content.encode()) + os.chmod(numa_executable.name, 511) + numa_executable.close() + self.logger.info('\t+ Setting multiprocessing executable to numactl wrapper') + set_executable(numa_executable.name) + yield + self.logger.info('\t+ Resetting default multiprocessing executable') + os.unlink(numa_executable.name) + set_executable(sys.executable) + +# File: optimum-benchmark-main/optimum_benchmark/launchers/config.py +from abc import ABC +from dataclasses import dataclass, field +from logging import getLogger +from typing import Any, Dict, Optional, TypeVar +from ..system_utils import is_nvidia_system, is_rocm_system +LOGGER = getLogger('launcher') + +@dataclass +class LauncherConfig(ABC): + name: str + _target_: str + device_isolation: bool = False + device_isolation_action: Optional[str] = None + numactl: bool = False + numactl_kwargs: Dict[str, Any] = field(default_factory=dict) + + def __post_init__(self): + if self.device_isolation and (not is_nvidia_system()) and (not is_rocm_system()): + raise ValueError('Device isolation is only supported on NVIDIA and ROCm systems. Please set `device_isolation` to False or make sure your drivers are correctly installed by running `nvidia-smi` or `rocm-smi`.') + if self.device_isolation and self.device_isolation_action is None: + LOGGER.warning('Device isolation is enabled but no action is specified. Please set `device_isolation_action` to either `error`, `warn`, or `kill`. Defaulting to `warn`.') + self.device_isolation_action = 'warn' + elif self.device_isolation and self.device_isolation_action not in {'error', 'warn', 'kill'}: + raise ValueError(f'Unsupported device isolation action {self.device_isolation_action}. Please set `device_isolation_action` to either `error`, `warn`, or `kill`.') +LauncherConfigT = TypeVar('LauncherConfigT', bound=LauncherConfig) + +# File: optimum-benchmark-main/optimum_benchmark/launchers/device_isolation_utils.py +import os +import signal +import sys +import time +from logging import getLogger +from typing import Set +from ..import_utils import is_amdsmi_available, is_psutil_available, is_pynvml_available +from ..logging_utils import setup_logging +from ..system_utils import is_nvidia_system, is_rocm_system +if is_psutil_available(): + import psutil +if is_pynvml_available(): + import pynvml +if is_amdsmi_available(): + import amdsmi +LOGGER = getLogger('device-isolation') + +class DeviceIsolationError(Exception): + pass + +def isolation_error_signal_handler(signum, frame): + raise DeviceIsolationError('Received an error signal from the device isolation process') +if sys.platform == 'linux': + signal.signal(signal.SIGUSR1, isolation_error_signal_handler) + +def get_nvidia_devices_pids(device_ids: str) -> Set[int]: + if not is_pynvml_available(): + raise ValueError('The library pynvml is required to get the pids running on NVIDIA GPUs, but is not installed. Please install the official and NVIDIA maintained PyNVML library through `pip install nvidia-ml-py`.') + pynvml.nvmlInit() + devices_pids = set() + devices_ids = list(map(int, device_ids.split(','))) + for device_id in devices_ids: + device_handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) + device_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(device_handle) + for device_process in device_processes: + devices_pids.add(device_process.pid) + pynvml.nvmlShutdown() + return devices_pids + +def get_amd_devices_pids(device_ids: str) -> Set[int]: + if not is_amdsmi_available(): + raise ValueError('The library amdsmi is required to get the pids running on AMD GPUs, but is not installed. Please install the official and AMD maintained amdsmi library from https://github.com/ROCm/amdsmi.') + amdsmi.amdsmi_init() + permission_denied = False + devices_pids = set() + devices_ids = list(map(int, device_ids.split(','))) + processor_handles = amdsmi.amdsmi_get_processor_handles() + for device_id in devices_ids: + processor_handle = processor_handles[device_id] + if permission_denied: + continue + try: + processes_handles = amdsmi.amdsmi_get_gpu_process_list(processor_handle) + except Exception as e: + permission_denied = 'Permission denied' in str(e) + continue + for process_handle in processes_handles: + try: + info = amdsmi.amdsmi_get_gpu_process_info(processor_handle, process_handle) + except Exception as e: + permission_denied = 'Permission denied' in str(e) + continue + if info['memory_usage']['vram_mem'] == 4096: + continue + devices_pids.add(info['pid']) + amdsmi.amdsmi_shut_down() + return devices_pids + +def get_pids_running_on_system_devices(device_ids: str) -> Set[int]: + if is_nvidia_system(): + devices_pids = get_nvidia_devices_pids(device_ids) + elif is_rocm_system(): + devices_pids = get_amd_devices_pids(device_ids) + else: + raise ValueError('get_pids_running_on_system_device is only supported on NVIDIA and AMD GPUs') + return devices_pids + +def get_children_pids(pid: int) -> Set[int]: + if not is_psutil_available(): + raise ValueError('The library psutil is required to get the children pids of a process, but is not installed. Please install the official and cross-platform psutil library through `pip install psutil`.') + if not psutil.pid_exists(pid): + LOGGER.warn(f'Process with pid [{pid}] does not exist.') + return set() + process = psutil.Process(pid) + children = process.children(recursive=True) + children_pids = {child.pid for child in children} + return children_pids + +def assert_device_isolation(pid: int, device_ids: str, action: str): + log_level = os.environ.get('LOG_LEVEL', 'INFO') + log_to_file = os.environ.get('LOG_TO_FILE', '1') == '1' + setup_logging(log_level, to_file=log_to_file, prefix='DEVICE-ISOLATION-PROCESS') + device_isolation_pid = os.getpid() + permitted_parent_pids = {pid, device_isolation_pid} + while any((psutil.pid_exists(p) for p in permitted_parent_pids)): + device_pids = get_pids_running_on_system_devices(device_ids=device_ids) + device_pids = {p for p in device_pids if psutil.pid_exists(p)} + permitted_children_pids = set() + for pid in permitted_parent_pids: + permitted_children_pids |= get_children_pids(pid) + permitted_pids = permitted_parent_pids | permitted_children_pids + permitted_pids = {p for p in permitted_pids if psutil.pid_exists(p)} + non_permitted_pids = device_pids - permitted_pids + if len(non_permitted_pids) > 0: + LOGGER.warn(f'Found process(es) [{non_permitted_pids}] running on device(s) [{device_ids}], other than the isolated process [{pid}], the device isolation process [{device_isolation_pid}] and their children [{permitted_children_pids}].') + if action == 'warn': + LOGGER.warn('Make sure no other process is running on the device(s) while benchmarking.') + elif action == 'error': + LOGGER.error('Signaling the isolated process to error out.') + if sys.platform == 'linux': + os.kill(pid, signal.SIGUSR1) + else: + LOGGER.error('Sending an error signal is only supported on Linux. Killing the isolated process.') + os.kill(pid, signal.SIGKILL) + elif action == 'kill': + LOGGER.error('Killing the isolated process.') + os.kill(pid, signal.SIGKILL) + LOGGER.warn('Exiting device isolation process.') + exit(0) + time.sleep(1) + +# File: optimum-benchmark-main/optimum_benchmark/launchers/inline/config.py +from dataclasses import dataclass +from ..config import LauncherConfig + +@dataclass +class InlineConfig(LauncherConfig): + name: str = 'inline' + _target_: str = 'optimum_benchmark.launchers.inline.launcher.InlineLauncher' + + def __post_init__(self): + super().__post_init__() + if self.device_isolation: + raise ValueError('Device isolation is not supported with the inline launcher. Use `process` launcher instead.') + if self.device_isolation_action is not None: + raise ValueError('Device isolation is not supported with the inline launcher. Use `process` launcher instead.') + +# File: optimum-benchmark-main/optimum_benchmark/launchers/inline/launcher.py +from typing import Any, Callable, List +from ...benchmark.report import BenchmarkReport +from ..base import Launcher +from .config import InlineConfig + +class InlineLauncher(Launcher[InlineConfig]): + NAME = 'inline' + + def __init__(self, config: InlineConfig): + super().__init__(config) + + def launch(self, worker: Callable[..., BenchmarkReport], worker_args: List[Any]) -> BenchmarkReport: + self.logger.warn('The inline launcher is only recommended for debugging purposes and not for benchmarking') + report = worker(*worker_args) + return report + +# File: optimum-benchmark-main/optimum_benchmark/launchers/process/config.py +from dataclasses import dataclass +from ..config import LauncherConfig + +@dataclass +class ProcessConfig(LauncherConfig): + name: str = 'process' + _target_: str = 'optimum_benchmark.launchers.process.launcher.ProcessLauncher' + start_method: str = 'spawn' + + def __post_init__(self): + super().__post_init__() + if self.start_method not in ['spawn', 'fork']: + raise ValueError(f"start_method must be one of ['spawn', 'fork'], got {self.start_method}") + +# File: optimum-benchmark-main/optimum_benchmark/launchers/process/launcher.py +import os +import traceback +from contextlib import ExitStack +from logging import Logger +from multiprocessing import Pipe, Process, get_start_method, set_start_method +from multiprocessing.connection import Connection +from typing import Any, Callable, List +from ...benchmark.report import BenchmarkReport +from ...logging_utils import setup_logging +from ..base import Launcher +from .config import ProcessConfig + +class ProcessLauncher(Launcher[ProcessConfig]): + NAME = 'process' + + def __init__(self, config: ProcessConfig): + super().__init__(config) + if get_start_method(allow_none=True) != self.config.start_method: + self.logger.info(f'\t+ Setting multiprocessing start method to {self.config.start_method}') + set_start_method(self.config.start_method, force=True) + self.logger.info('\t+ Warming up multiprocessing context') + dummy_process = Process(target=dummy_target, daemon=False) + dummy_process.start() + dummy_process.join() + dummy_process.close() + + def launch(self, worker: Callable[..., BenchmarkReport], worker_args: List[Any]) -> BenchmarkReport: + (child_connection, parent_connection) = Pipe() + isolated_process = Process(target=target, args=(worker, worker_args, child_connection, self.logger), daemon=False) + with ExitStack() as stack: + if self.config.numactl: + stack.enter_context(self.numactl_executable()) + self.logger.info('\t+ Starting isolated process') + isolated_process.start() + while True: + if parent_connection.poll(): + message = parent_connection.recv() + if message == 'READY': + self.logger.info('\t+ Isolated process is ready') + break + else: + raise RuntimeError(f'Unexpected message from isolated process: {message}') + with ExitStack() as stack: + if self.config.device_isolation: + stack.enter_context(self.device_isolation(isolated_process.pid)) + parent_connection.send('START') + isolated_process.join() + if isolated_process.exitcode != 0: + raise RuntimeError(f'Isolated process exited with non-zero code {isolated_process.exitcode}') + if parent_connection.poll(): + response = parent_connection.recv() + if 'traceback' in response: + self.logger.error('\t+ Received traceback from isolated process') + raise ChildProcessError(response['traceback']) + elif 'exception' in response: + self.logger.error('\t+ Received exception from isolated process') + raise ChildProcessError(response['exception']) + elif 'report' in response: + self.logger.info('\t+ Received report from isolated process') + report = BenchmarkReport.from_dict(response['report']) + report.log() + else: + raise RuntimeError(f'Received an unexpected response from isolated process: {response}') + return report + +def target(worker: Callable[..., BenchmarkReport], worker_args: List[Any], connection: Connection, logger: Logger) -> None: + log_level = os.environ.get('LOG_LEVEL', 'INFO') + log_to_file = os.environ.get('LOG_TO_FILE', '1') == '1' + setup_logging(level=log_level, to_file=log_to_file, prefix='ISOLATED-PROCESS') + connection.send('READY') + while True: + if connection.poll(): + message = connection.recv() + if message == 'START': + logger.info('\t+ Starting benchmark in isolated process') + break + else: + raise RuntimeError(f'Unexpected message from main process: {message}') + try: + report = worker(*worker_args) + except Exception: + logger.error('\t+ Sending traceback to main process') + connection.send({'traceback': traceback.format_exc()}) + else: + logger.info('\t+ Sending report to main process') + connection.send({'report': report.to_dict()}) + finally: + logger.info('\t+ Exiting isolated process') + connection.close() + exit(0) + +def dummy_target() -> None: + exit(0) + +# File: optimum-benchmark-main/optimum_benchmark/launchers/torchrun/config.py +import uuid +from dataclasses import dataclass, field +from typing import Any, Dict, Optional +from ..config import LauncherConfig + +@dataclass +class TorchrunConfig(LauncherConfig): + name: str = 'torchrun' + _target_: str = 'optimum_benchmark.launchers.torchrun.launcher.TorchrunLauncher' + min_nodes: int = 1 + max_nodes: int = 1 + nproc_per_node: int = 2 + role: str = 'benchmarker' + monitor_interval: int = 30 + rdzv_id: str = str(uuid.uuid4()) + rdzv_backend: str = 'c10d' + rdzv_endpoint: str = 'localhost:0' + rdzv_configs: Dict[str, Any] = field(default_factory=lambda : {'rank': 0, 'timeout': -1}) + rdzv_timeout: int = -1 + max_restarts: int = 0 + start_method: str = 'spawn' + local_addr: Optional[str] = None + socket_ifname: Optional[str] = None + + def __post_init__(self): + super().__post_init__() + if self.start_method not in ['spawn', 'fork']: + raise ValueError(f"start_method must be one of ['spawn', 'fork'], got {self.start_method}") + if self.min_nodes != self.max_nodes: + raise ValueError(f'min_nodes and max_nodes must be equal for a reproducible benchmark, got {self.min_nodes} and {self.max_nodes}') + +# File: optimum-benchmark-main/optimum_benchmark/launchers/torchrun/launcher.py +import os +import traceback +from contextlib import ExitStack +from logging import Logger +from multiprocessing import Pipe, Process, get_start_method, set_start_method +from multiprocessing.connection import Connection +from typing import Any, Callable, List +import torch.distributed +from torch.distributed.launcher.api import LaunchConfig, elastic_launch +from ...benchmark.report import BenchmarkReport +from ...logging_utils import setup_logging +from ..base import Launcher +from .config import TorchrunConfig + +class TorchrunLauncher(Launcher[TorchrunConfig]): + NAME = 'torchrun' + + def __init__(self, config: TorchrunConfig): + super().__init__(config) + if get_start_method(allow_none=True) != self.config.start_method: + self.logger.info(f'\t+ Setting multiprocessing start method to {self.config.start_method}') + set_start_method(self.config.start_method, force=True) + self.logger.info('\t+ Warming up multiprocessing context') + dummy_process = Process() + dummy_process.start() + dummy_process.join() + self.launch_config = LaunchConfig(min_nodes=self.config.min_nodes, max_nodes=self.config.max_nodes, nproc_per_node=self.config.nproc_per_node, run_id=self.config.rdzv_id, role=self.config.role, rdzv_endpoint=self.config.rdzv_endpoint, rdzv_backend=self.config.rdzv_backend, rdzv_configs=self.config.rdzv_configs, rdzv_timeout=self.config.rdzv_timeout, max_restarts=self.config.max_restarts, monitor_interval=self.config.monitor_interval, start_method=self.config.start_method, local_addr=self.config.local_addr) + + def launch(self, worker: Callable[..., BenchmarkReport], worker_args: List[Any]) -> BenchmarkReport: + (parent_connection, child_connection) = Pipe() + isolated_process = Process(target=target, args=(worker, worker_args, child_connection, self.launch_config, self.logger), daemon=False) + with ExitStack() as stack: + if self.config.numactl: + stack.enter_context(self.numactl_executable()) + self.logger.info('\t+ Starting isolated process') + isolated_process.start() + while True: + if parent_connection.poll(): + message = parent_connection.recv() + if message == 'READY': + self.logger.info('\t+ Isolated process is ready') + break + else: + raise RuntimeError(f'Unexpected message from isolated process: {message}') + with ExitStack() as stack: + if self.config.device_isolation: + stack.enter_context(self.device_isolation(isolated_process.pid)) + parent_connection.send('START') + isolated_process.join() + if isolated_process.exitcode != 0: + raise RuntimeError(f'Isolated process exited with non-zero code {isolated_process.exitcode}') + if parent_connection.poll(): + response = parent_connection.recv() + else: + raise RuntimeError('Isolated process did not send any response') + reports = [] + for output in response: + if 'traceback' in output: + if 'rank' in output: + self.logger.error(f"\t+ Received traceback from rank process [{output['rank']}]") + raise ChildProcessError(output['traceback']) + else: + self.logger.error('\t+ Received traceback from isolated process') + raise ChildProcessError(output['traceback']) + elif 'report' in output: + self.logger.info(f"\t+ Received report from rank process [{output['rank']}]") + reports.append(BenchmarkReport.from_dict(output['report'])) + else: + raise RuntimeError(f'Received an unexpected response from isolated process: {output}') + self.logger.info('\t+ Aggregating reports from all rank processes') + report = BenchmarkReport.aggregate(reports) + report.log() + return report + +def target(worker: Callable[..., BenchmarkReport], worker_args: List[Any], connection: Connection, config: LaunchConfig, logger: Logger): + log_level = os.environ.get('LOG_LEVEL', 'INFO') + log_to_file = os.environ.get('LOG_TO_FILE', '1') == '1' + setup_logging(level=log_level, to_file=log_to_file, prefix='ISOLATED-PROCESS') + connection.send('READY') + while True: + if connection.poll(): + message = connection.recv() + if message == 'START': + logger.info('\t+ Starting benchmark in isolated process') + break + else: + raise RuntimeError(f'Unexpected message from main process: {message}') + try: + elastic_agent_launcher = elastic_launch(config=config, entrypoint=entrypoint) + outputs = elastic_agent_launcher(worker, worker_args, logger) + except Exception: + logger.error('\t+ Sending traceback to main process') + connection.send([{'traceback': traceback.format_exc()}]) + else: + logger.info('\t+ Sending outputs to main process') + connection.send(list(outputs.values())) + finally: + logger.info('\t+ Exiting isolated process') + connection.close() + exit(0) + +def entrypoint(worker: Callable[..., BenchmarkReport], worker_args: List[Any], logger: Logger): + rank = int(os.environ.get('RANK', '0')) + log_level = os.environ.get('LOG_LEVEL', 'INFO') + log_to_file = os.environ.get('LOG_TO_FILE', '1') == '1' + log_all_ranks = os.environ.get('LOG_ALL_RANKS', '0') == '1' + if log_all_ranks or rank == 0: + setup_logging(level=log_level, to_file=log_to_file, prefix=f'RANK-PROCESS-{rank}') + else: + setup_logging(level='ERROR', to_file=log_to_file, prefix=f'RANK-PROCESS-{rank}') + if torch.cuda.is_available(): + logger.info(f'\t+ Setting torch.distributed cuda device to {rank}') + device = torch.device('cuda', rank) + torch.cuda.set_device(device) + logger.info('\t+ Initializing torch.distributed process group') + torch.distributed.init_process_group() + try: + report = worker(*worker_args) + except Exception: + logger.error('\t+ Benchmark failed with an exception') + output = {'rank': rank, 'traceback': traceback.format_exc()} + else: + logger.info('\t+ Benchmark completed successfully') + output = {'rank': rank, 'report': report.to_dict()} + finally: + logger.info('\t+ Destroying torch.distributed process group') + torch.distributed.destroy_process_group() + logger.info('\t+ Exiting rank process') + return output + +# File: optimum-benchmark-main/optimum_benchmark/logging_utils.py +import logging +import logging.config +from subprocess import PIPE, STDOUT, Popen +from typing import List, Optional + +def setup_logging(level: str='INFO', to_file: bool=False, use_colorlog: bool=True, prefix: Optional[str]=None, disable_existing_loggers: bool=False): + logging_config = {'version': 1, 'handlers': {'console': {'formatter': 'simple', 'stream': 'ext://sys.stdout', 'class': 'logging.StreamHandler'}}, 'root': {'level': level, 'handlers': ['console']}, 'disable_existing_loggers': disable_existing_loggers} + logging_config['formatters'] = {'simple': {'format': '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'}} + if to_file: + logging_config['handlers']['file'] = {'formatter': 'simple', 'filename': 'benchmark.log', 'class': 'logging.FileHandler'} + logging_config['root']['handlers'].append('file') + if use_colorlog: + logging_config['formatters']['colorlog'] = {'()': 'colorlog.ColoredFormatter', 'format': '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - %(message)s', 'log_colors': {'DEBUG': 'purple', 'INFO': 'green', 'WARNING': 'yellow', 'CRITICAL': 'red', 'ERROR': 'red'}} + for handler in logging_config['handlers']: + logging_config['handlers'][handler]['formatter'] = 'colorlog' + if prefix is not None: + for formatter in logging_config['formatters']: + logging_config['formatters'][formatter]['format'] = f'[{prefix}]' + logging_config['formatters'][formatter]['format'] + logging.config.dictConfig(logging_config) + +def run_subprocess_and_log_stream_output(logger: logging.Logger, args: List[str]) -> Popen: + popen = Popen(args, stdout=PIPE, stderr=STDOUT) + for line in iter(popen.stdout.readline, b''): + if line is not None: + logger.info(line.decode('utf-8').rstrip()) + popen.wait() + return popen + +# File: optimum-benchmark-main/optimum_benchmark/profilers/fx_profiler.py +import time +from logging import getLogger +from typing import Any, List, Tuple +import torch +from torch.fx import Interpreter +from torch.fx.graph_module import GraphModule +from torch.fx.node import Node +LOGGER = getLogger('fx_profiler') + +class FXProfilingWrapper(Interpreter): + + def __init__(self, module: GraphModule): + super().__init__(module) + self.profiling_records: List[Tuple[str, str, float]] = [] + + def run(self, *args) -> Any: + return super().run(*args) + + def run_node(self, node: Node) -> Any: + if self.module.device.type == 'cuda': + start = torch.cuda.Event(enable_timing=True) + end = torch.cuda.Event(enable_timing=True) + start.record(stream=torch.cuda.current_stream()) + return_val = super().run_node(node) + end.record(stream=torch.cuda.current_stream()) + torch.cuda.synchronize() + node_runtime = start.elapsed_time(end) / 1000.0 + else: + start = time.perf_counter_ns() + return_val = super().run_node(node) + end = time.perf_counter_ns() + node_runtime = (end - start) / 1000000000.0 + LOGGER.debug(f'Node {node.name} took {node_runtime:.2e} seconds') + self.profiling_records.append((node.name, node.op, node_runtime)) + return return_val + + def __call__(self, **kwargs) -> Any: + args = kwargs.values() + return super().run(*args) + + def get_profiling_records(self) -> List[Tuple[str, str, float]]: + return self.profiling_records + +# File: optimum-benchmark-main/optimum_benchmark/profilers/ort_profiler.py +import json +from logging import getLogger +from typing import List, Tuple +import pandas as pd +from optimum.onnxruntime import ORTModel +LOGGER = getLogger('ort_profiler') + +class ORTProfilingWrapper: + + def __init__(self, module: ORTModel): + self.module = module + self.profiling_records: List[Tuple[str, str, float]] = [] + + def __call__(self, *args, **kwargs): + return self.module(*args, **kwargs) + + def get_profiling_records(self) -> List[Tuple[str, str, float]]: + profiling_json = self.module.model.end_profiling() + with open(profiling_json) as file_obj: + profiling_data = json.load(file_obj) + if isinstance(profiling_data, dict): + profiling_data = profiling_data['traceEvents'] + profiling_records = extract_last_run_records(profiling_data) + return normalize_records(profiling_records) + +def normalize_records(data) -> List[Tuple[str, str, float]]: + records = [] + for item in data: + cat = item.get('cat') + if cat is None: + continue + dur = item.get('dur') + if dur is None: + continue + arg = item.get('args') + if arg is None: + continue + op_name = arg.get('op_name') + name = item['name'] + if cat != 'Kernel' and (not name.endswith('kernel_time')): + continue + if cat in ['Kernel', 'Node']: + LOGGER.debug(f'Kernel/Node {name} took {dur / 1000000.0:.2e} seconds') + records.append((name.replace('_kernel_time', ''), op_name, dur / 1000000.0)) + return records + +def extract_last_run_records(data): + return pd.DataFrame(data)[['name', 'cat', 'dur', 'args']].groupby('name').last().reset_index().to_dict(orient='records') + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/__init__.py +from .config import ScenarioConfig +from .energy_star.config import EnergyStarConfig +from .inference.config import InferenceConfig +from .training.config import TrainingConfig +__all__ = ['EnergyStarConfig', 'InferenceConfig', 'TrainingConfig', 'ScenarioConfig'] + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/base.py +from abc import ABC +from logging import getLogger +from typing import ClassVar, Generic +from ..backends.base import Backend +from ..benchmark.report import BenchmarkReport +from .config import ScenarioConfigT + +class Scenario(Generic[ScenarioConfigT], ABC): + NAME: ClassVar[str] + + def __init__(self, config: ScenarioConfigT) -> None: + self.config = config + self.logger = getLogger(self.NAME) + self.logger.info(f'Allocating {self.NAME} scenario') + + def run(self, backend: Backend) -> BenchmarkReport: + raise NotImplementedError('Scenario must implement run method') + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/config.py +from abc import ABC +from dataclasses import dataclass +from logging import getLogger +from typing import TypeVar +LOGGER = getLogger('benchmark') + +@dataclass +class ScenarioConfig(ABC): + name: str + _target_: str + + def __post_init__(self): + pass +ScenarioConfigT = TypeVar('ScenarioConfigT', bound=ScenarioConfig) + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/energy_star/config.py +from dataclasses import dataclass, field +from logging import getLogger +from typing import Any, Dict, Union +from ...system_utils import is_rocm_system +from ..config import ScenarioConfig +LOGGER = getLogger('energy_star') +INPUT_SHAPES = {'batch_size': 1} + +@dataclass +class EnergyStarConfig(ScenarioConfig): + name: str = 'energy_star' + _target_: str = 'optimum_benchmark.scenarios.energy_star.scenario.EnergyStarScenario' + dataset_name: str = field(default='', metadata={'help': 'Name of the dataset on the HF Hub.'}) + dataset_config: str = field(default='', metadata={'help': 'Name of the config of the dataset.'}) + dataset_split: str = field(default='train', metadata={'help': 'Dataset split to use.'}) + num_samples: int = field(default=-1, metadata={'help': 'Number of samples to select in the dataset. -1 means all.'}) + input_shapes: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Input shapes for the model. Missing keys will be filled with default values.'}) + text_column_name: str = field(default='text', metadata={'help': 'Name of the column with the text input.'}) + truncation: Union[bool, str] = field(default=False, metadata={'help': 'To truncate the inputs.'}) + max_length: int = field(default=-1, metadata={'help': 'Maximum length to use by one of the truncation/padding parameters'}) + warmup_runs: int = field(default=10, metadata={'help': 'Number of warmup runs to perform before scenarioing'}) + energy: bool = field(default=True, metadata={'help': 'Measure energy usage'}) + forward_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the forward method of the model.'}) + generate_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the generate method of the model.'}) + call_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the __call__ method of the pipeline.'}) + + def __post_init__(self): + super().__post_init__() + self.input_shapes = {**INPUT_SHAPES, **self.input_shapes} + if 'max_new_tokens' in self.generate_kwargs and 'min_new_tokens' in self.generate_kwargs and (self.generate_kwargs['max_new_tokens'] != self.generate_kwargs['min_new_tokens']): + raise ValueError('Setting `min_new_tokens` and `max_new_tokens` to different values results in non-deterministic behavior.') + elif 'max_new_tokens' in self.generate_kwargs and 'min_new_tokens' not in self.generate_kwargs: + LOGGER.warning('Setting `max_new_tokens` without `min_new_tokens` results in non-deterministic behavior. Setting `min_new_tokens` to `max_new_tokens`.') + self.generate_kwargs['min_new_tokens'] = self.generate_kwargs['max_new_tokens'] + elif 'min_new_tokens' in self.generate_kwargs and 'max_new_tokens' not in self.generate_kwargs: + LOGGER.warning('Setting `min_new_tokens` without `max_new_tokens` results in non-deterministic behavior. Setting `max_new_tokens` to `min_new_tokens`.') + self.generate_kwargs['max_new_tokens'] = self.generate_kwargs['min_new_tokens'] + if self.energy and is_rocm_system(): + raise ValueError('Energy measurement through codecarbon is not yet available on ROCm-powered devices.') + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/energy_star/preprocessing_utils.py +from datasets import Dataset +from transformers import PreTrainedTokenizer +from ...backends.transformers_utils import PretrainedProcessor +from .config import EnergyStarConfig + +def preprocess(dataset: Dataset, task: str, config: EnergyStarConfig, preprocessor: PretrainedProcessor) -> Dataset: + task_to_preprocessing = {'feature-extraction': feature_extraction_preprocessing} + return task_to_preprocessing[task](dataset, config, preprocessor) + +def feature_extraction_preprocessing(dataset: Dataset, config: EnergyStarConfig, tokenizer: PreTrainedTokenizer) -> Dataset: + if config.input_shapes['batch_size'] == 1: + dataset = dataset.filter(lambda example: example[config.text_column_name] != '') + if config.num_samples != -1: + dataset = dataset.select(range(config.num_samples)) + if getattr(tokenizer, 'pad_token', None) is None: + tokenizer.pad_token = tokenizer.eos_token + padding = False if config.input_shapes['batch_size'] == 1 else True + + def tokenize_function(examples): + return tokenizer(examples[config.text_column_name], padding=padding, truncation=config.truncation, max_length=config.max_length if config.max_length != -1 else None) + dataset = dataset.map(tokenize_function, batched=True, remove_columns=dataset.features, desc='Running tokenizer on dataset').with_format('torch') + return dataset + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/energy_star/scenario.py +from dataclasses import dataclass +from logging import getLogger +import torch +from datasets import load_dataset +from torch.utils.data import DataLoader +from tqdm import tqdm +from ...backends.base import Backend, BackendConfigT +from ...benchmark.report import BenchmarkMeasurements, BenchmarkReport +from ...import_utils import is_torch_distributed_available +from ...task_utils import IMAGE_DIFFUSION_TASKS, TEXT_GENERATION_TASKS +from ...trackers.energy import Efficiency, EnergyTracker +from ..base import Scenario +from .config import EnergyStarConfig +from .preprocessing_utils import preprocess +if is_torch_distributed_available(): + import torch.distributed +LOGGER = getLogger('energy_star') +PER_TOKEN_BACKENDS = ['pytorch', 'onnxruntime', 'openvino', 'neural-compressor'] +TEXT_GENERATION_DEFAULT_KWARGS = {'num_return_sequences': 1, 'max_new_tokens': 100, 'min_new_tokens': 100, 'temperature': 1.0, 'do_sample': False, 'use_cache': True, 'pad_token_id': 0, 'num_beams': 1} +TEXT_GENERATION_PREFILL_OVERRIDES = {'max_new_tokens': 1, 'min_new_tokens': 1} +TEXT_GENERATION_WARMUP_OVERRIDES = {'max_new_tokens': 2, 'min_new_tokens': 2} +IMAGE_DIFFUSION_DEFAULT_KWARGS = {'num_inference_steps': 30, 'num_images_per_prompt': 1} +IMAGE_DIFFUSION_WARMUP_OVERRIDES = {'num_inference_steps': 2} +TEXT_GENERATION_THROUGHPUT_UNIT = 'tokens/s' +IMAGE_DIFFUSION_THROUGHPUT_UNIT = 'images/s' +INFERENCE_THROUGHPUT_UNIT = 'samples/s' +TEXT_GENERATION_EFFICIENCY_UNIT = 'tokens/kWh' +IMAGE_DIFFUSION_EFFICIENCY_UNIT = 'images/kWh' +INFERENCE_EFFICIENCY_UNIT = 'samples/kWh' + +@dataclass +class TextGenerationReport(BenchmarkReport): + preprocess: BenchmarkMeasurements + per_token: BenchmarkMeasurements + prefill: BenchmarkMeasurements + decode: BenchmarkMeasurements + +@dataclass +class ImageDiffusionReport(BenchmarkReport): + preprocess: BenchmarkMeasurements + call: BenchmarkMeasurements + +@dataclass +class InferenceReport(BenchmarkReport): + preprocess: BenchmarkMeasurements + forward: BenchmarkMeasurements + +class EnergyStarScenario(Scenario[EnergyStarConfig]): + NAME = 'energy_star' + + def __init__(self, config: EnergyStarConfig) -> None: + super().__init__(config) + + def run(self, backend: Backend[BackendConfigT]) -> BenchmarkReport: + if is_torch_distributed_available() and torch.distributed.is_initialized(): + LOGGER.info('\t+ Distributing batch size across processes') + if self.config.input_shapes['batch_size'] % torch.distributed.get_world_size() != 0: + raise ValueError('The batch size must be divisible by the number of processes in a distributed environment') + self.config.input_shapes['batch_size'] //= torch.distributed.get_world_size() + self.energy_tracker = EnergyTracker(device=backend.config.device, device_ids=backend.config.device_ids) + LOGGER.info('\t+ Loading dataset') + raw_dataset = load_dataset(self.config.dataset_name, self.config.dataset_config, split=self.config.dataset_split) + LOGGER.info('\t+ Preprocessing dataset') + with self.energy_tracker.track(): + self.dataset = preprocess(dataset=raw_dataset, task=backend.config.task, config=self.config, preprocessor=backend.pretrained_processor) + self.preprocessing_energy = self.energy_tracker.get_energy() + self.energy_tracker.reset() + LOGGER.info('\t+ Initialising dataloader') + self.dataloader = DataLoader(self.dataset, batch_size=self.config.input_shapes['batch_size']) + if backend.config.task in TEXT_GENERATION_TASKS: + LOGGER.info('\t+ Updating Text Generation kwargs with default values') + self.config.generate_kwargs = {**TEXT_GENERATION_WARMUP_OVERRIDES, **self.config.generate_kwargs} + LOGGER.info('\t+ Initializing Text Generation report') + self.report = TextGenerationReport(preprocess=BenchmarkMeasurements(), per_token=BenchmarkMeasurements(), prefill=BenchmarkMeasurements(), decode=BenchmarkMeasurements()) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + LOGGER.info('\t+ Updating Image Diffusion kwargs with default values') + self.config.call_kwargs = {**IMAGE_DIFFUSION_WARMUP_OVERRIDES, **self.config.call_kwargs} + LOGGER.info('\t+ Initializing Image Diffusion report') + self.report = ImageDiffusionReport(preprocess=BenchmarkMeasurements(), call=BenchmarkMeasurements()) + else: + LOGGER.info('\t+ Initializing Inference report') + self.report = InferenceReport(preprocess=BenchmarkMeasurements(), forward=BenchmarkMeasurements()) + self.report.preprocess.energy = self.preprocessing_energy + self.report.preprocess.efficiency = Efficiency.from_energy(self.report.preprocess.energy, self.inference_volume, unit=INFERENCE_EFFICIENCY_UNIT) + LOGGER.info('\t+ Preparing backend for Inference') + backend.prepare_for_inference(input_shapes=self.config.input_shapes, inference_kwargs={**self.config.generate_kwargs, **self.config.forward_kwargs, **self.config.call_kwargs}) + LOGGER.info('\t+ Warming up backend for Inference') + warmup_inputs = backend.prepare_inputs(next(iter(self.dataloader))) + for _ in range(self.config.warmup_runs): + if backend.config.task in TEXT_GENERATION_TASKS: + _ = backend.generate(warmup_inputs, {'max_new_tokens': 2, 'min_new_tokens': 2}) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + _ = backend.call(warmup_inputs, {'num_inference_steps': 2}) + else: + _ = backend.forward(warmup_inputs, self.config.forward_kwargs) + if backend.config.task in TEXT_GENERATION_TASKS: + LOGGER.info('\t+ Additional warmup for Text Generation') + _ = backend.generate(warmup_inputs, self.config.generate_kwargs) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + LOGGER.info('\t+ Additional warmup for Image Diffusion') + _ = backend.call(warmup_inputs, self.config.call_kwargs) + if self.config.energy: + if backend.config.task in TEXT_GENERATION_TASKS: + self.run_text_generation_energy_tracking(backend) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.run_image_diffusion_energy_tracking(backend) + else: + self.run_inference_energy_tracking(backend) + self.report.log_energy() + self.report.log_efficiency() + return self.report + + def run_text_generation_energy_tracking(self, backend: Backend[BackendConfigT]): + LOGGER.info('\t+ Running energy tracking') + with self.energy_tracker.track(): + for inputs in tqdm(self.dataloader): + inputs = backend.prepare_inputs(inputs) + _ = backend.forward(inputs, self.config.forward_kwargs) + self.report.prefill.energy = self.energy_tracker.get_energy() + self.report.prefill.efficiency = Efficiency.from_energy(self.report.prefill.energy, self.text_generation_prefill_volume, unit=TEXT_GENERATION_EFFICIENCY_UNIT) + self.energy_tracker.reset() + with self.energy_tracker.track(): + for inputs in tqdm(self.dataloader): + _ = backend.generate(inputs, self.config.generate_kwargs) + self.report.decode.energy = self.energy_tracker.get_energy() + self.report.decode.efficiency = Efficiency.from_energy(self.report.decode.energy, self.text_generation_decode_volume, unit=TEXT_GENERATION_EFFICIENCY_UNIT) + self.energy_tracker.reset() + + def run_image_diffusion_energy_tracking(self, backend: Backend[BackendConfigT]): + LOGGER.info('\t+ Running energy tracking') + with self.energy_tracker.track(): + for inputs in tqdm(self.dataloader): + inputs = backend.prepare_inputs(inputs) + _ = backend.call(self.call_inputs, self.config.call_kwargs) + self.report.call.energy = self.energy_tracker.get_energy() + self.report.call.efficiency = Efficiency.from_energy(self.report.call.energy, self.image_diffusion_volume, unit=IMAGE_DIFFUSION_EFFICIENCY_UNIT) + self.energy_tracker.reset() + + def run_inference_energy_tracking(self, backend: Backend[BackendConfigT]): + LOGGER.info('\t+ Running energy tracking') + with self.energy_tracker.track(): + for inputs in tqdm(self.dataloader): + inputs = backend.prepare_inputs(inputs) + _ = backend.forward(inputs, self.config.forward_kwargs) + self.report.forward.energy = self.energy_tracker.get_energy() + self.report.forward.efficiency = Efficiency.from_energy(self.report.forward.energy, self.inference_volume, unit=INFERENCE_EFFICIENCY_UNIT) + self.energy_tracker.reset() + + @property + def inference_volume(self) -> int: + return self.config.num_samples + + @property + def image_diffusion_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.call_kwargs['num_images_per_prompt'] + + @property + def text_generation_prefill_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.input_shapes['sequence_length'] + + @property + def text_generation_per_token_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.generate_kwargs['num_return_sequences'] + + @property + def text_generation_decode_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.generate_kwargs['num_return_sequences'] * (self.config.generate_kwargs['max_new_tokens'] - 1) + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/inference/config.py +from dataclasses import dataclass, field +from logging import getLogger +from typing import Any, Dict, Optional +from ...system_utils import is_rocm_system +from ..config import ScenarioConfig +LOGGER = getLogger('inference') +INPUT_SHAPES = {'batch_size': 2, 'num_choices': 2, 'sequence_length': 16} + +@dataclass +class InferenceConfig(ScenarioConfig): + name: str = 'inference' + _target_: str = 'optimum_benchmark.scenarios.inference.scenario.InferenceScenario' + iterations: int = field(default=10, metadata={'help': 'Minimum number of iterations to run the benchmark. The number of tracked inferences will be at least this value.Set to 0 to disable this constraint (benchmark will run for `duration` seconds).'}) + duration: int = field(default=10, metadata={'help': 'Minimum duration of the benchmark in seconds. The sum of tracked inferences will be at least this value.Set to 0 to disable this constraint (benchmark will run for `iterations` iterations).'}) + warmup_runs: int = field(default=10, metadata={'help': 'Number of warmup runs to perform before benchmarking.'}) + input_shapes: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Input shapes for the model. Missing keys will be filled with default values.'}) + new_tokens: Optional[int] = field(default=None, metadata={'help': 'If set, `max_new_tokens` and `min_new_tokens` will be set to this value.'}) + memory: bool = field(default=False, metadata={'help': 'Measure max memory usage'}) + latency: bool = field(default=True, metadata={'help': 'Measure latencies and throughputs'}) + energy: bool = field(default=False, metadata={'help': 'Measure energy usage and efficiency'}) + forward_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the forward method of the backend.'}) + generate_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the generate method of the backend.'}) + call_kwargs: Dict[str, Any] = field(default_factory=dict, metadata={'help': 'Keyword arguments to pass to the call method of the backend.'}) + + def __post_init__(self): + super().__post_init__() + self.input_shapes = {**INPUT_SHAPES, **self.input_shapes} + if self.new_tokens is not None: + LOGGER.warning('`new_tokens` is deprecated. Use `max_new_tokens` and `min_new_tokens` instead. Setting `max_new_tokens` and `min_new_tokens` to `new_tokens`.') + self.generate_kwargs['max_new_tokens'] = self.new_tokens + self.generate_kwargs['min_new_tokens'] = self.new_tokens + if 'max_new_tokens' in self.generate_kwargs and 'min_new_tokens' in self.generate_kwargs and (self.generate_kwargs['max_new_tokens'] != self.generate_kwargs['min_new_tokens']): + raise ValueError('Setting `min_new_tokens` and `max_new_tokens` to different values results in non-deterministic behavior.') + elif 'max_new_tokens' in self.generate_kwargs and 'min_new_tokens' not in self.generate_kwargs: + LOGGER.warning('Setting `max_new_tokens` without `min_new_tokens` results in non-deterministic behavior. Setting `min_new_tokens` to `max_new_tokens`.') + self.generate_kwargs['min_new_tokens'] = self.generate_kwargs['max_new_tokens'] + elif 'min_new_tokens' in self.generate_kwargs and 'max_new_tokens' not in self.generate_kwargs: + LOGGER.warning('Setting `min_new_tokens` without `max_new_tokens` results in non-deterministic behavior. Setting `max_new_tokens` to `min_new_tokens`.') + self.generate_kwargs['max_new_tokens'] = self.generate_kwargs['min_new_tokens'] + if self.energy and is_rocm_system(): + raise ValueError('Energy measurement through codecarbon is not yet available on ROCm-powered devices.') + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/inference/scenario.py +import time +from contextlib import ExitStack +from transformers import LogitsProcessorList +from ...backends.base import Backend, BackendConfigT +from ...benchmark.report import BenchmarkReport +from ...generators.input_generator import InputGenerator +from ...task_utils import IMAGE_DIFFUSION_TASKS, TEXT_GENERATION_TASKS +from ...trackers.energy import Efficiency, EnergyTracker +from ...trackers.latency import LatencyTracker, PerTokenLatencyLogitsProcessor, Throughput +from ...trackers.memory import MemoryTracker +from ..base import Scenario +from .config import InferenceConfig +PER_TOKEN_BACKENDS = ['pytorch', 'onnxruntime', 'openvino', 'neural-compressor'] +TEXT_GENERATION_DEFAULT_KWARGS = {'num_return_sequences': 1, 'max_new_tokens': 100, 'min_new_tokens': 100, 'do_sample': False, 'use_cache': True, 'pad_token_id': 0, 'eos_token_id': 0, 'num_beams': 1} +TEXT_GENERATION_PREFILL_OVERRIDES = {'max_new_tokens': 1, 'min_new_tokens': 1} +TEXT_GENERATION_WARMUP_OVERRIDES = {'max_new_tokens': 2, 'min_new_tokens': 2} +IMAGE_DIFFUSION_DEFAULT_KWARGS = {'num_inference_steps': 30, 'num_images_per_prompt': 1} +IMAGE_DIFFUSION_WARMUP_OVERRIDES = {'num_inference_steps': 2} +TEXT_GENERATION_THROUGHPUT_UNIT = 'tokens/s' +IMAGE_DIFFUSION_THROUGHPUT_UNIT = 'images/s' +INFERENCE_THROUGHPUT_UNIT = 'samples/s' +TEXT_GENERATION_EFFICIENCY_UNIT = 'tokens/kWh' +IMAGE_DIFFUSION_EFFICIENCY_UNIT = 'images/kWh' +INFERENCE_EFFICIENCY_UNIT = 'samples/kWh' + +class InferenceScenario(Scenario[InferenceConfig]): + NAME = 'inference' + + def __init__(self, config: InferenceConfig) -> None: + super().__init__(config) + + def run(self, backend: Backend[BackendConfigT]) -> BenchmarkReport: + self.logger.info('\t+ Creating input generator') + self.input_generator = InputGenerator(task=backend.config.task, model_shapes=backend.model_shapes, input_shapes=self.config.input_shapes) + if backend.config.task in TEXT_GENERATION_TASKS: + self.logger.info('\t+ Generating Text Generation inputs') + self.inputs = self.input_generator() + self.logger.info('\t+ Updating Text Generation kwargs with default values') + self.config.generate_kwargs = {**TEXT_GENERATION_DEFAULT_KWARGS, **self.config.generate_kwargs} + self.logger.info('\t+ Initializing Text Generation report') + self.report = BenchmarkReport.from_list(targets=['load', 'prefill', 'decode', 'per_token']) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.logger.info('\t+ Generating Image Diffusion inputs') + self.inputs = self.input_generator() + self.logger.info('\t+ Updating Image Diffusion kwargs with default values') + self.config.call_kwargs = {**IMAGE_DIFFUSION_DEFAULT_KWARGS, **self.config.call_kwargs} + self.logger.info('\t+ Initializing Image Diffusion report') + self.report = BenchmarkReport.from_list(targets=['load', 'call']) + else: + self.logger.info('\t+ Generating Inference inputs') + self.inputs = self.input_generator() + self.logger.info('\t+ Initializing Inference report') + self.report = BenchmarkReport.from_list(targets=['load', 'forward']) + self.logger.info('\t+ Preparing input shapes for Inference') + self.config.input_shapes = backend.prepare_input_shapes(input_shapes=self.config.input_shapes) + self.run_model_loading_tracking(backend) + self.logger.info('\t+ Preparing inputs for Inference') + self.inputs = backend.prepare_inputs(inputs=self.inputs) + if self.config.memory: + if backend.config.task in TEXT_GENERATION_TASKS: + self.run_text_generation_memory_tracking(backend) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.run_image_diffusion_memory_tracking(backend) + else: + self.run_inference_memory_tracking(backend) + self.report.log_memory() + if self.config.latency or self.config.energy: + if backend.config.task in TEXT_GENERATION_TASKS: + self.warmup_text_generation(backend) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.warmup_image_diffusion(backend) + else: + self.warmup_inference(backend) + if self.config.latency: + if backend.config.task in TEXT_GENERATION_TASKS: + if backend.config.name in PER_TOKEN_BACKENDS: + self.run_per_token_text_generation_latency_tracking(backend) + else: + self.run_text_generation_latency_tracking(backend) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.run_image_diffusion_latency_tracking(backend) + else: + self.run_latency_inference_tracking(backend) + self.report.log_latency() + self.report.log_throughput() + if self.config.energy: + if backend.config.task in TEXT_GENERATION_TASKS: + self.run_text_generation_energy_tracking(backend) + elif backend.config.task in IMAGE_DIFFUSION_TASKS: + self.run_image_diffusion_energy_tracking(backend) + else: + self.run_inference_energy_tracking(backend) + self.report.log_energy() + self.report.log_efficiency() + return self.report + + def warmup_text_generation(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Warming up backend for Text Generation') + _ = backend.generate(self.inputs, self.config.generate_kwargs) + for _ in range(self.config.warmup_runs): + _ = backend.generate(self.inputs, {**self.config.generate_kwargs, **TEXT_GENERATION_WARMUP_OVERRIDES}) + + def warmup_image_diffusion(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Warming up backend for Image Diffusion') + _ = backend.call(self.inputs, self.config.call_kwargs) + for _ in range(self.config.warmup_runs): + _ = backend.call(self.inputs, {**self.config.call_kwargs, **IMAGE_DIFFUSION_WARMUP_OVERRIDES}) + + def warmup_inference(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Warming up backend for Inference') + for _ in range(self.config.warmup_runs): + _ = backend.forward(self.inputs, self.config.forward_kwargs) + + def run_model_loading_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running model loading tracking') + if self.config.latency: + latency_tracker = LatencyTracker(backend=backend.config.name, device=backend.config.device) + if self.config.memory: + memory_tracker = MemoryTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + if self.config.energy: + energy_tracker = EnergyTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + context_stack = ExitStack() + if self.config.latency: + context_stack.enter_context(latency_tracker.track()) + if self.config.memory: + context_stack.enter_context(memory_tracker.track()) + if self.config.energy: + context_stack.enter_context(energy_tracker.track()) + with context_stack: + self.logger.info('\t+ Loading model for Inference') + backend.load() + if self.config.latency: + self.report.load.latency = latency_tracker.get_latency() + if self.config.memory: + self.report.load.memory = memory_tracker.get_max_memory() + if self.config.energy: + self.report.load.energy = energy_tracker.get_energy() + + def run_text_generation_memory_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Text Generation memory tracking') + self.memory_tracker = MemoryTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + prefill_kwargs = {**self.config.generate_kwargs, **TEXT_GENERATION_PREFILL_OVERRIDES} + with self.memory_tracker.track(): + _ = backend.prefill(self.inputs, prefill_kwargs) + self.report.prefill.memory = self.memory_tracker.get_max_memory() + with self.memory_tracker.track(): + _ = backend.generate(self.inputs, self.config.generate_kwargs) + self.report.decode.memory = self.memory_tracker.get_max_memory() + + def run_image_diffusion_memory_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Image Diffusion memory tracking') + self.memory_tracker = MemoryTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + with self.memory_tracker.track(): + _ = backend.call(self.inputs, self.config.call_kwargs) + self.report.call.memory = self.memory_tracker.get_max_memory() + + def run_inference_memory_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Inference memory tracking') + self.memory_tracker = MemoryTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + with self.memory_tracker.track(): + _ = backend.forward(self.inputs, self.config.forward_kwargs) + self.report.forward.memory = self.memory_tracker.get_max_memory() + + def run_per_token_text_generation_latency_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Per-Token Text Generation latency tracking') + latency_tracker = PerTokenLatencyLogitsProcessor(device=backend.config.device, backend=backend.config.name) + per_token_kwargs = {**self.config.generate_kwargs, 'logits_processor': LogitsProcessorList([latency_tracker])} + while latency_tracker.elapsed() < self.config.duration or latency_tracker.count() < self.config.iterations: + with latency_tracker.track(): + _ = backend.generate(self.inputs, per_token_kwargs) + per_token_latency = latency_tracker.get_per_token_latency() + prefill_latency = latency_tracker.get_prefill_latency() + decode_latency = latency_tracker.get_decode_latency() + per_token_volume = self.atomic_per_token_volume + prefill_volume = self.atomic_prefill_volume + decode_volume = self.atomic_decode_volume + self.report.per_token.latency = per_token_latency + self.report.prefill.latency = prefill_latency + self.report.decode.latency = decode_latency + self.report.per_token.throughput = Throughput.from_latency(per_token_latency, per_token_volume, unit=TEXT_GENERATION_THROUGHPUT_UNIT) + self.report.prefill.throughput = Throughput.from_latency(prefill_latency, prefill_volume, unit=TEXT_GENERATION_THROUGHPUT_UNIT) + self.report.decode.throughput = Throughput.from_latency(decode_latency, decode_volume, unit=TEXT_GENERATION_THROUGHPUT_UNIT) + + def run_text_generation_latency_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Text Generation latency tracking') + latency_tracker = LatencyTracker(backend=backend.config.name, device=backend.config.device) + prefill_kwargs = {**self.config.generate_kwargs, **TEXT_GENERATION_PREFILL_OVERRIDES} + while latency_tracker.elapsed() < self.config.duration or latency_tracker.count() < self.config.iterations: + with latency_tracker.track(): + _ = backend.prefill(self.inputs, prefill_kwargs) + prefill_latency = latency_tracker.get_latency() + prefill_volume = self.atomic_prefill_volume + self.report.prefill.latency = prefill_latency + self.report.prefill.throughput = Throughput.from_latency(prefill_latency, prefill_volume, unit=TEXT_GENERATION_THROUGHPUT_UNIT) + latency_tracker.reset() + while latency_tracker.elapsed() < self.config.duration or latency_tracker.count() < self.config.iterations: + with latency_tracker.track(): + _ = backend.generate(self.inputs, self.config.generate_kwargs) + generate_latency = latency_tracker.get_latency() + decode_latency = generate_latency - prefill_latency + decode_volume = self.atomic_decode_volume + self.report.decode.latency = decode_latency + self.report.decode.throughput = Throughput.from_latency(decode_latency, decode_volume, unit=TEXT_GENERATION_THROUGHPUT_UNIT) + + def run_image_diffusion_latency_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Image Diffusion latency tracking') + latency_tracker = LatencyTracker(backend=backend.config.name, device=backend.config.device) + while latency_tracker.elapsed() < self.config.duration or latency_tracker.count() < self.config.iterations: + with latency_tracker.track(): + _ = backend.call(self.inputs, self.config.call_kwargs) + call_latency = latency_tracker.get_latency() + call_volume = self.atomic_call_volume + self.report.call.latency = call_latency + self.report.call.throughput = Throughput.from_latency(call_latency, call_volume, unit=IMAGE_DIFFUSION_THROUGHPUT_UNIT) + + def run_latency_inference_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Inference latency tracking') + latency_tracker = LatencyTracker(backend=backend.config.name, device=backend.config.device) + while latency_tracker.elapsed() < self.config.duration or latency_tracker.count() < self.config.iterations: + with latency_tracker.track(): + _ = backend.forward(self.inputs, self.config.forward_kwargs) + forward_latency = latency_tracker.get_latency() + forward_volume = self.atomic_forward_volume + self.report.forward.latency = forward_latency + self.report.forward.throughput = Throughput.from_latency(forward_latency, forward_volume, unit=INFERENCE_THROUGHPUT_UNIT) + + def run_text_generation_energy_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Text Generation energy tracking') + energy_tracker = EnergyTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + prefill_kwargs = {**self.config.generate_kwargs, **TEXT_GENERATION_PREFILL_OVERRIDES} + count = 0 + elapsed = 0 + start_time = time.perf_counter() + with energy_tracker.track(file_prefix='prefill'): + while elapsed < self.config.duration or count < self.config.iterations: + _ = backend.prefill(self.inputs, prefill_kwargs) + elapsed = time.perf_counter() - start_time + count += 1 + prefill_energy = energy_tracker.get_energy() / count + prefill_volume = self.atomic_prefill_volume + self.report.prefill.energy = prefill_energy + self.report.prefill.efficiency = Efficiency.from_energy(prefill_energy, prefill_volume, unit=TEXT_GENERATION_EFFICIENCY_UNIT) + count = 0 + elapsed = 0 + start_time = time.perf_counter() + with energy_tracker.track(file_prefix='generate'): + while elapsed < self.config.duration or count < self.config.iterations: + _ = backend.generate(self.inputs, self.config.generate_kwargs) + elapsed = time.perf_counter() - start_time + count += 1 + generate_energy = energy_tracker.get_energy() / count + decode_energy = generate_energy - prefill_energy + decode_volume = self.atomic_decode_volume + self.report.decode.energy = decode_energy + self.report.decode.efficiency = Efficiency.from_energy(decode_energy, decode_volume, unit=TEXT_GENERATION_EFFICIENCY_UNIT) + + def run_image_diffusion_energy_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running Image Diffusion energy tracking') + energy_tracker = EnergyTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + count = 0 + elapsed = 0 + start_time = time.perf_counter() + with energy_tracker.track(file_prefix='call'): + while elapsed < self.config.duration or count < self.config.iterations: + _ = backend.call(self.inputs, self.config.call_kwargs) + elapsed = time.perf_counter() - start_time + count += 1 + call_energy = energy_tracker.get_energy() / count + call_volume = self.atomic_call_volume + self.report.call.energy = call_energy + self.report.call.efficiency = Efficiency.from_energy(call_energy, call_volume, unit=IMAGE_DIFFUSION_EFFICIENCY_UNIT) + + def run_inference_energy_tracking(self, backend: Backend[BackendConfigT]): + self.logger.info('\t+ Running energy tracking') + energy_tracker = EnergyTracker(backend=backend.config.name, device=backend.config.device, device_ids=backend.config.device_ids) + count = 0 + elapsed = 0 + start_time = time.perf_counter() + with energy_tracker.track(file_prefix='forward'): + while elapsed < self.config.duration or count < self.config.iterations: + _ = backend.forward(self.inputs, self.config.forward_kwargs) + elapsed = time.perf_counter() - start_time + count += 1 + forward_energy = energy_tracker.get_energy() / count + forward_volume = self.atomic_forward_volume + self.report.forward.energy = forward_energy + self.report.forward.efficiency = Efficiency.from_energy(forward_energy, forward_volume, unit=INFERENCE_EFFICIENCY_UNIT) + + @property + def atomic_forward_volume(self) -> int: + return self.config.input_shapes['batch_size'] + + @property + def atomic_call_volume(self) -> int: + if 'prompt' in self.inputs: + return self.config.input_shapes['batch_size'] * self.config.call_kwargs['num_images_per_prompt'] + else: + return self.config.input_shapes['batch_size'] + + @property + def atomic_prefill_volume(self) -> int: + if {'input_ids', 'prompt', 'prompts'} & set(self.inputs.keys()): + return self.config.input_shapes['batch_size'] * max(self.config.input_shapes['sequence_length'], 1) + else: + return self.config.input_shapes['batch_size'] + + @property + def atomic_per_token_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.generate_kwargs['num_beams'] + + @property + def atomic_decode_volume(self) -> int: + return self.config.input_shapes['batch_size'] * self.config.generate_kwargs['num_beams'] * (self.config.generate_kwargs['max_new_tokens'] - 1) + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/training/config.py +from dataclasses import dataclass, field +from logging import getLogger +from typing import Any, Dict +from ..config import ScenarioConfig +LOGGER = getLogger('training') +TRAINING_ARGUMENT = {'per_device_train_batch_size': 2, 'gradient_accumulation_steps': 1, 'output_dir': './trainer_output', 'evaluation_strategy': 'no', 'eval_strategy': 'no', 'save_strategy': 'no', 'do_train': True, 'use_cpu': False, 'max_steps': -1, 'do_eval': False, 'do_predict': False, 'report_to': 'none', 'skip_memory_metrics': True, 'ddp_find_unused_parameters': False} +DATASET_SHAPES = {'dataset_size': 500, 'sequence_length': 16, 'num_choices': 1} + +@dataclass +class TrainingConfig(ScenarioConfig): + name: str = 'training' + _target_: str = 'optimum_benchmark.scenarios.training.scenario.TrainingScenario' + max_steps: int = 140 + warmup_steps: int = 40 + dataset_shapes: Dict[str, Any] = field(default_factory=dict) + training_arguments: Dict[str, Any] = field(default_factory=dict) + latency: bool = field(default=True, metadata={'help': 'Measure latencies and throughputs'}) + memory: bool = field(default=False, metadata={'help': 'Measure max memory usage'}) + energy: bool = field(default=False, metadata={'help': 'Measure energy usage'}) + + def __post_init__(self): + super().__post_init__() + self.dataset_shapes = {**DATASET_SHAPES, **self.dataset_shapes} + self.training_arguments = {**TRAINING_ARGUMENT, **self.training_arguments} + if self.training_arguments['max_steps'] == -1: + self.training_arguments['max_steps'] = self.max_steps + if self.max_steps != self.training_arguments['max_steps']: + LOGGER.warning(f"`scenario.max_steps` ({self.max_steps}) and `scenario.training_arguments.max_steps` ({self.training_arguments['max_steps']}) are different. Using `scenario.training_arguments.max_steps`.") + self.max_steps = self.training_arguments['max_steps'] + if self.warmup_steps > self.max_steps: + raise ValueError(f'`scenario.warmup_steps` ({self.warmup_steps}) must be smaller than `scenario.max_steps` ({self.max_steps})') + +# File: optimum-benchmark-main/optimum_benchmark/scenarios/training/scenario.py +from contextlib import ExitStack +from transformers import default_data_collator +from ...backends.base import Backend, BackendConfigT +from ...benchmark.report import BenchmarkReport +from ...generators.dataset_generator import DatasetGenerator +from ...trackers.energy import Efficiency, EnergyTracker +from ...trackers.latency import StepLatencyTrainerCallback, Throughput +from ...trackers.memory import MemoryTracker +from ..base import Scenario +from .config import TrainingConfig +TRAIN_THROUGHPUT_UNIT = 'samples/s' +TRAIN_EFFICIENCY_UNIT = 'samples/kWh' + +class TrainingScenario(Scenario[TrainingConfig]): + NAME = 'training' + + def __init__(self, config: TrainingConfig) -> None: + super().__init__(config) + + def run(self, backend: Backend[BackendConfigT]) -> BenchmarkReport: + self.logger.info('\t+ Creating dataset generator') + dataset_generator = DatasetGenerator(task=backend.config.task, model_shapes=backend.model_shapes, dataset_shapes=self.config.dataset_shapes) + self.logger.info('\t+ Generating training dataset') + training_dataset = dataset_generator() + self.logger.info('\t+ Initializing training report') + self.report = BenchmarkReport.from_list(targets=['overall', 'warmup', 'train']) + self.logger.info('\t+ Loading model into backend') + backend.load() + training_callbackes = [] + if self.config.latency: + self.logger.info('\t+ Creating latency tracking callback') + latency_callback = StepLatencyTrainerCallback(device=backend.config.device, backend=backend.config.name) + self.logger.info('\t+ Adding latency measuring callback') + training_callbackes.append(latency_callback) + context_stack = ExitStack() + if self.config.memory: + self.logger.info('\t+ Creating memory tracking context manager') + memory_tracker = MemoryTracker(device=backend.config.device, backend=backend.config.name, device_ids=backend.config.device_ids) + if self.config.energy: + self.logger.info('\t+ Creating energy tracking context manager') + energy_tracker = EnergyTracker(device=backend.config.device, device_ids=backend.config.device_ids) + if self.config.memory: + self.logger.info('\t+ Entering memory tracking context manager') + context_stack.enter_context(memory_tracker.track()) + if self.config.energy: + self.logger.info('\t+ Entering energy tracking context manager') + context_stack.enter_context(energy_tracker.track()) + with context_stack: + backend.train(training_dataset=training_dataset, training_callbacks=training_callbackes, training_data_collator=default_data_collator, training_arguments=self.config.training_arguments) + if self.config.latency: + self.report.overall.latency = latency_callback.get_latency() + self.report.overall.throughput = Throughput.from_latency(self.report.overall.latency, volume=self.overall_volume, unit=TRAIN_THROUGHPUT_UNIT) + self.report.warmup.latency = self.report.overall.latency[:self.config.warmup_steps] + self.report.warmup.throughput = Throughput.from_latency(self.report.warmup.latency, volume=self.warmup_volume, unit=TRAIN_THROUGHPUT_UNIT) + self.report.train.latency = self.report.overall.latency[self.config.warmup_steps:] + self.report.train.throughput = Throughput.from_latency(self.report.train.latency, volume=self.train_volume, unit=TRAIN_THROUGHPUT_UNIT) + if self.config.memory: + self.report.overall.memory = memory_tracker.get_max_memory() + self.report.warmup.memory = memory_tracker.get_max_memory() + self.report.train.memory = memory_tracker.get_max_memory() + if self.config.energy: + self.report.overall.energy = energy_tracker.get_energy() + self.report.overall.efficiency = Efficiency.from_energy(self.report.overall.energy, volume=self.overall_volume, unit=TRAIN_EFFICIENCY_UNIT) + return self.report + + @property + def overall_volume(self) -> int: + return self.config.max_steps * self.config.training_arguments['per_device_train_batch_size'] * self.config.training_arguments['gradient_accumulation_steps'] + + @property + def warmup_volume(self) -> int: + return self.config.warmup_steps * self.config.training_arguments['per_device_train_batch_size'] * self.config.training_arguments['gradient_accumulation_steps'] + + @property + def train_volume(self) -> int: + return self.overall_volume - self.warmup_volume + +# File: optimum-benchmark-main/optimum_benchmark/system_utils.py +import os +import platform +import re +import subprocess +from typing import List, Optional +import psutil +from .import_utils import is_amdsmi_available, is_pynvml_available, is_pyrsmi_available + +def get_socket_ifname() -> Optional[str]: + for interface in psutil.net_if_addrs(): + if interface.startswith('e'): + return interface + raise None + +def get_cpu() -> Optional[str]: + if platform.system() == 'Windows': + return platform.processor() + elif platform.system() == 'Darwin': + command = 'sysctl -n machdep.cpu.brand_string' + return str(subprocess.check_output(command, shell=True).decode().strip()) + elif platform.system() == 'Linux': + command = 'cat /proc/cpuinfo' + all_info = subprocess.check_output(command, shell=True).decode().strip() + for line in all_info.split('\n'): + if 'model name' in line: + return re.sub('.*model name.*:', '', line, 1) + return 'Could not find device name' + else: + raise ValueError(f"Unknown system '{platform.system()}'") + +def get_cpu_ram_mb(): + return psutil.virtual_memory().total / 1000000.0 +try: + subprocess.check_output('nvidia-smi') + _nvidia_system = True +except Exception: + _nvidia_system = False +try: + subprocess.check_output('rocm-smi') + _rocm_system = True +except Exception: + _rocm_system = False + +def is_nvidia_system(): + return _nvidia_system + +def is_rocm_system(): + return _rocm_system +if is_nvidia_system() and is_pynvml_available(): + import pynvml +if is_rocm_system() and is_amdsmi_available(): + import amdsmi +if is_rocm_system() and is_pyrsmi_available(): + from pyrsmi import rocml + +def get_rocm_version(): + for folder in os.listdir('/opt/'): + if 'rocm' in folder and 'rocm' != folder: + return folder.split('-')[-1] + raise ValueError('Could not find ROCm version.') + +def get_gpus(): + if is_nvidia_system(): + if not is_pynvml_available(): + raise ValueError('The library PyNVML is required to get available GPUs, but is not installed. Please install the official and NVIDIA maintained PyNVML library through `pip install nvidia-ml-py`.') + gpus = [] + pynvml.nvmlInit() + for i in range(pynvml.nvmlDeviceGetCount()): + handle = pynvml.nvmlDeviceGetHandleByIndex(i) + gpu = pynvml.nvmlDeviceGetName(handle) + gpu = gpu.decode('utf-8') if isinstance(gpu, bytes) else gpu + gpus.append(gpu) + pynvml.nvmlShutdown() + elif is_rocm_system(): + if not is_amdsmi_available() and (not is_pyrsmi_available()): + raise ValueError('Either the library AMD SMI or PyRSMI is required to get available GPUs, but neither is installed. Please install the official and AMD maintained AMD SMI library from https://github.com/ROCm/amdsmi or PyRSMI library from https://github.com/ROCm/pyrsmi.') + gpus = [] + if is_amdsmi_available(): + amdsmi.amdsmi_init() + for processor_handles in amdsmi.amdsmi_get_processor_handles(): + gpus.append(amdsmi.amdsmi_get_gpu_vendor_name(processor_handles)) + amdsmi.amdsmi_shut_down() + elif is_pyrsmi_available(): + rocml.smi_initialize() + for i in range(rocml.smi_get_device_count()): + gpus.append(rocml.smi_get_device_name(i)) + rocml.smi_shutdown() + else: + raise ValueError('No NVIDIA or ROCm GPUs found.') + return gpus + +def get_gpu_vram_mb() -> List[int]: + if is_nvidia_system(): + if not is_pynvml_available(): + raise ValueError('The library PyNVML is required to get GPU VRAM, but is not installed. Please install the official and NVIDIA maintained PyNVML library through `pip install nvidia-ml-py`.') + pynvml.nvmlInit() + vrams = [pynvml.nvmlDeviceGetMemoryInfo(pynvml.nvmlDeviceGetHandleByIndex(i)).total for i in range(pynvml.nvmlDeviceGetCount())] + pynvml.nvmlShutdown() + elif is_rocm_system(): + if not is_amdsmi_available() and (not is_pyrsmi_available()): + raise ValueError('Either the library AMD SMI or PyRSMI is required to get GPU VRAM, but neither is installed. Please install the official and AMD maintained AMD SMI library from https://github.com/ROCm/amdsmi or PyRSMI library from https://github.com/ROCm/pyrsmi.') + if is_amdsmi_available(): + amdsmi.amdsmi_init() + vrams = [amdsmi.amdsmi_get_gpu_memory_total(processor_handles, mem_type=amdsmi.AmdSmiMemoryType.VRAM) for processor_handles in amdsmi.amdsmi_get_processor_handles()] + amdsmi.amdsmi_shut_down() + elif is_pyrsmi_available(): + rocml.smi_initialize() + vrams = [rocml.smi_get_device_memory_total(i) for i in range(rocml.smi_get_device_count())] + rocml.smi_shutdown() + else: + raise ValueError('No NVIDIA or ROCm GPUs found.') + return sum(vrams) + +def get_gpu_device_ids() -> str: + if is_nvidia_system(): + if os.environ.get('NVIDIA_VISIBLE_DEVICES', None) is not None: + device_ids = os.environ['NVIDIA_VISIBLE_DEVICES'] + elif os.environ.get('CUDA_VISIBLE_DEVICES', None) is not None: + device_ids = os.environ['CUDA_VISIBLE_DEVICES'] + else: + if not is_pynvml_available(): + raise ValueError('The library PyNVML is required to get GPU device ids, but is not installed. Please install the official and NVIDIA maintained PyNVML library through `pip install nvidia-ml-py`.') + pynvml.nvmlInit() + device_ids = list(range(pynvml.nvmlDeviceGetCount())) + device_ids = ','.join((str(i) for i in device_ids)) + pynvml.nvmlShutdown() + elif is_rocm_system(): + if os.environ.get('ROCR_VISIBLE_DEVICES', None) is not None: + device_ids = os.environ['ROCR_VISIBLE_DEVICES'] + elif os.environ.get('HIP_VISIBLE_DEVICES', None) is not None: + device_ids = os.environ['HIP_VISIBLE_DEVICES'] + elif os.environ.get('CUDA_VISIBLE_DEVICES', None) is not None: + device_ids = os.environ['CUDA_VISIBLE_DEVICES'] + else: + if not is_amdsmi_available() or not is_pyrsmi_available(): + raise ValueError('Either the library AMD SMI or PyRSMI is required to get GPU device ids, but neither is installed. Please install the official and AMD maintained AMD SMI library from https://github.com/ROCm/amdsmi or PyRSMI library from https://github.com/ROCm/pyrsmi.') + if is_pyrsmi_available(): + rocml.smi_initialize() + device_ids = list(range(rocml.smi_get_device_count())) + device_ids = ','.join((str(i) for i in device_ids)) + rocml.smi_shutdown() + elif is_amdsmi_available(): + amdsmi.amdsmi_init() + device_ids = list(range(len(amdsmi.amdsmi_get_processor_handles()))) + device_ids = ','.join((str(i) for i in device_ids)) + amdsmi.amdsmi_shut_down() + else: + raise ValueError("Couldn't infer GPU device ids.") + return device_ids + +def get_system_info() -> dict: + system_dict = {'cpu': get_cpu(), 'cpu_count': os.cpu_count(), 'cpu_ram_mb': get_cpu_ram_mb(), 'system': platform.system(), 'machine': platform.machine(), 'platform': platform.platform(), 'processor': platform.processor(), 'python_version': platform.python_version()} + if is_nvidia_system() or is_rocm_system(): + system_dict['gpu'] = get_gpus() + system_dict['gpu_count'] = len(get_gpus()) + system_dict['gpu_vram_mb'] = get_gpu_vram_mb() + return system_dict + +# File: optimum-benchmark-main/optimum_benchmark/task_utils.py +import importlib +import json +import os +from typing import Optional +import huggingface_hub +from .backends.diffusers_utils import TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES as DIFFUSERS_TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES +from .backends.diffusers_utils import get_diffusers_pretrained_config +from .backends.timm_utils import get_timm_pretrained_config +from .backends.transformers_utils import TASKS_TO_MODEL_LOADERS, get_transformers_pretrained_config +from .backends.transformers_utils import TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES as TRANSFORMERS_TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES +_SYNONYM_TASK_MAP = {'masked-lm': 'fill-mask', 'causal-lm': 'text-generation', 'default': 'feature-extraction', 'vision2seq-lm': 'image-to-text', 'text-to-speech': 'text-to-audio', 'seq2seq-lm': 'text2text-generation', 'translation': 'text2text-generation', 'summarization': 'text2text-generation', 'mask-generation': 'feature-extraction', 'audio-ctc': 'automatic-speech-recognition', 'sentence-similarity': 'feature-extraction', 'speech2seq-lm': 'automatic-speech-recognition', 'sequence-classification': 'text-classification', 'zero-shot-classification': 'text-classification'} +IMAGE_DIFFUSION_TASKS = ['inpainting', 'text-to-image', 'image-to-image'] +TEXT_GENERATION_TASKS = ['image-to-text', 'conversational', 'text-generation', 'text2text-generation', 'automatic-speech-recognition'] +TEXT_EMBEDDING_TASKS = ['feature-extraction'] + +def map_from_synonym(task: str) -> str: + if task in _SYNONYM_TASK_MAP: + task = _SYNONYM_TASK_MAP[task] + return task + +def infer_library_from_model_name_or_path(model_name_or_path: str, revision: Optional[str]=None, token: Optional[str]=None) -> str: + inferred_library_name = None + if huggingface_hub.repo_exists(model_name_or_path, token=token): + model_info = huggingface_hub.model_info(model_name_or_path, revision=revision, token=token) + inferred_library_name = getattr(model_info, 'library_name', None) + if inferred_library_name is None: + repo_files = huggingface_hub.list_repo_files(model_name_or_path, revision=revision, token=token) + if 'model_index.json' in repo_files: + inferred_library_name = 'diffusers' + if inferred_library_name is None: + raise RuntimeError(f'Could not infer library name from repo {model_name_or_path}.') + elif os.path.isdir(model_name_or_path): + local_files = os.listdir(model_name_or_path) + if 'model_index.json' in local_files: + inferred_library_name = 'diffusers' + elif 'config.json' in local_files: + config_dict = json.load(open(os.path.join(model_name_or_path, 'config.json'), 'r')) + if 'pretrained_cfg' in config_dict or 'architecture' in config_dict: + inferred_library_name = 'timm' + elif '_diffusers_version' in config_dict: + inferred_library_name = 'diffusers' + else: + inferred_library_name = 'transformers' + if inferred_library_name is None: + raise KeyError(f'Could not find the proper library name for directory {model_name_or_path}.') + else: + raise KeyError(f"Could not find the proper library name for {model_name_or_path} because it's neither a repo nor a directory.") + if inferred_library_name == 'sentence-transformers': + inferred_library_name = 'transformers' + return inferred_library_name + +def infer_task_from_model_name_or_path(model_name_or_path: str, library_name: Optional[str]=None, revision: Optional[str]=None, token: Optional[str]=None) -> str: + if library_name is None: + library_name = infer_library_from_model_name_or_path(model_name_or_path, revision=revision, token=token) + inferred_task_name = None + if library_name == 'timm': + inferred_task_name = 'image-classification' + elif library_name == 'sentence-transformers': + inferred_task_name = 'feature-extraction' + elif os.path.isdir(model_name_or_path): + if library_name == 'diffusers': + diffusers_config = get_diffusers_pretrained_config(model_name_or_path, revision=revision, token=token) + class_name = diffusers_config['_class_name'] + for (task_name, model_mapping) in DIFFUSERS_TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES.items(): + for (model_type, model_class_name) in model_mapping.items(): + if class_name == model_class_name: + inferred_task_name = task_name + break + if inferred_task_name is not None: + break + elif library_name == 'transformers': + transformers_config = get_transformers_pretrained_config(model_name_or_path, revision=revision, token=token) + auto_modeling_module = importlib.import_module('transformers.models.auto.modeling_auto') + model_type = transformers_config.model_type + for (task_name, model_loaders) in TRANSFORMERS_TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES.items(): + if isinstance(model_loaders, str): + model_loaders = (model_loaders,) + for model_loader in model_loaders: + model_loader_class = getattr(auto_modeling_module, model_loader) + model_mapping = model_loader_class._model_mapping._model_mapping + if model_type in model_mapping: + inferred_task_name = task_name + break + if inferred_task_name is not None: + break + elif huggingface_hub.repo_exists(model_name_or_path, token=token): + model_info = huggingface_hub.model_info(model_name_or_path, revision=revision, token=token) + if model_info.pipeline_tag is not None: + inferred_task_name = map_from_synonym(model_info.pipeline_tag) + elif inferred_task_name is None: + if model_info.transformers_info is not None and model_info.transformersInfo.pipeline_tag is not None: + inferred_task_name = map_from_synonym(model_info.transformersInfo.pipeline_tag) + else: + auto_model_class_name = model_info.transformers_info['auto_model'] + for (task_name, model_loaders) in TASKS_TO_MODEL_LOADERS.items(): + if isinstance(model_loaders, str): + model_loaders = (model_loaders,) + for model_loader in model_loaders: + if auto_model_class_name == model_loader: + inferred_task_name = task_name + break + if inferred_task_name is not None: + break + if inferred_task_name is None: + raise KeyError(f'Could not find the proper task name for {auto_model_class_name}.') + return inferred_task_name + +def infer_model_type_from_model_name_or_path(model_name_or_path: str, library_name: Optional[str]=None, revision: Optional[str]=None, token: Optional[str]=None, trust_remote_code: bool=False) -> str: + if library_name is None: + library_name = infer_library_from_model_name_or_path(model_name_or_path, revision=revision, token=token) + inferred_model_type = None + if library_name == 'llama_cpp': + inferred_model_type = 'llama_cpp' + elif library_name == 'timm': + timm_config = get_timm_pretrained_config(model_name_or_path) + inferred_model_type = timm_config.architecture + elif library_name == 'diffusers': + config = get_diffusers_pretrained_config(model_name_or_path, revision=revision, token=token) + class_name = config['_class_name'] + for (task_name, model_mapping) in DIFFUSERS_TASKS_TO_MODEL_TYPES_TO_MODEL_CLASSES.items(): + for (model_type, model_class_name) in model_mapping.items(): + if model_class_name == class_name: + inferred_model_type = model_type + break + if inferred_model_type is not None: + break + else: + transformers_config = get_transformers_pretrained_config(model_name_or_path, revision=revision, token=token, trust_remote_code=trust_remote_code) + inferred_model_type = transformers_config.model_type + if inferred_model_type is None: + raise KeyError(f'Could not find the proper model type for {model_name_or_path}.') + return inferred_model_type + +# File: optimum-benchmark-main/optimum_benchmark/trackers/__init__.py +from .energy import Efficiency, Energy, EnergyTracker +from .latency import Latency, LatencyTracker, PerTokenLatencyLogitsProcessor, StepLatencyTrainerCallback, Throughput +from .memory import Memory, MemoryTracker +__all__ = ['Energy', 'EnergyTracker', 'Latency', 'LatencyTracker', 'Memory', 'MemoryTracker', 'PerTokenLatencyLogitsProcessor', 'StepLatencyTrainerCallback', 'Throughput', 'Efficiency'] + +# File: optimum-benchmark-main/optimum_benchmark/trackers/energy.py +import os +from contextlib import contextmanager +from dataclasses import asdict, dataclass +from json import dump +from logging import getLogger +from typing import List, Literal, Optional +from ..import_utils import is_codecarbon_available, is_torch_available, is_torch_distributed_available +from ..system_utils import get_gpu_device_ids +if is_torch_available(): + import torch +if is_torch_distributed_available(): + import torch.distributed +if is_codecarbon_available(): + from codecarbon import EmissionsTracker, OfflineEmissionsTracker + from codecarbon.output import EmissionsData +LOGGER = getLogger('energy') +POWER_UNIT = 'W' +ENERGY_UNIT = 'kWh' +Energy_Unit_Literal = Literal['kWh'] +Efficiency_Unit_Literal = Literal['samples/kWh', 'tokens/kWh', 'images/kWh'] +POWER_CONSUMPTION_SAMPLING_RATE = 1 + +@dataclass +class Energy: + unit: Energy_Unit_Literal + cpu: float + ram: float + gpu: float + total: float + + @staticmethod + def aggregate(energies: List['Energy']) -> 'Energy': + if len(energies) == 0 or all((energy is None for energy in energies)): + return None + elif any((energy is None for energy in energies)): + raise ValueError('Some energy measurements are missing') + cpu = sum((energy.cpu for energy in energies)) / len(energies) + gpu = sum((energy.gpu for energy in energies)) / len(energies) + ram = sum((energy.ram for energy in energies)) / len(energies) + total = sum((energy.total for energy in energies)) / len(energies) + return Energy(cpu=cpu, gpu=gpu, ram=ram, total=total, unit=ENERGY_UNIT) + + def log(self, prefix: str='forward'): + LOGGER.info(f'\t\t+ {prefix} energy consumption:') + LOGGER.info(f'\t\t\t+ CPU: {self.cpu:f} ({self.unit})') + LOGGER.info(f'\t\t\t+ GPU: {self.gpu:f} ({self.unit})') + LOGGER.info(f'\t\t\t+ RAM: {self.ram:f} ({self.unit})') + LOGGER.info(f'\t\t\t+ total: {self.total:f} ({self.unit})') + + def __sub__(self, other: 'Energy') -> 'Energy': + if self.unit != other.unit: + raise ValueError('Energy units must match to perform subtraction') + return Energy(cpu=self.cpu - other.cpu, gpu=self.gpu - other.gpu, ram=self.ram - other.ram, total=self.total - other.total, unit=self.unit) + + def __truediv__(self, scalar: float) -> 'Energy': + return Energy(cpu=self.cpu / scalar, gpu=self.gpu / scalar, ram=self.ram / scalar, total=self.total / scalar, unit=self.unit) + +@dataclass +class Efficiency: + unit: Efficiency_Unit_Literal + value: float + + @staticmethod + def aggregate(efficiencies: List['Efficiency']) -> 'Efficiency': + if len(efficiencies) == 0: + raise ValueError('No efficiency measurements to aggregate') + elif any((efficiency is None for efficiency in efficiencies)): + raise ValueError('Some efficiency measurements are None') + unit = efficiencies[0].unit + value = sum((efficiency.value for efficiency in efficiencies)) / len(efficiencies) + return Efficiency(value=value, unit=unit) + + @staticmethod + def from_energy(energy: 'Energy', volume: int, unit: str) -> 'Efficiency': + return Efficiency(value=volume / energy.total if energy.total > 0 else 0, unit=unit) + + def log(self, prefix: str=''): + LOGGER.info(f'\t\t+ {prefix} energy efficiency: {self.value:f} ({self.unit})') + +class EnergyTracker: + + def __init__(self, backend: str, device: str, device_ids: Optional[str]=None): + self.device = device + self.backend = backend + self.device_ids = device_ids + self.is_asynchronous = backend == 'pytorch' and device == 'cuda' + self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized() + if self.device == 'cuda': + if self.device_ids is None: + LOGGER.warning('\t+ `device=cuda` but `device_ids` not provided. Using all available CUDA devices.') + self.device_ids = get_gpu_device_ids() + self.device_ids = list(map(int, self.device_ids.split(','))) + LOGGER.info(f'\t+ Tracking GPU energy on devices {self.device_ids}') + if not is_codecarbon_available(): + raise ValueError('The library codecarbon is required to run energy benchmark, but is not installed. Please install it through `pip install codecarbon`.') + try: + self.emission_tracker = EmissionsTracker(log_level='warning', tracking_mode='machine', gpu_ids=self.device_ids, output_file='codecarbon.csv', measure_power_secs=POWER_CONSUMPTION_SAMPLING_RATE) + except Exception as e: + LOGGER.warning('\t+ Failed to initialize Online Emissions Tracker:, %s', e) + LOGGER.warning('\t+ Falling back to Offline Emissions Tracker') + if os.environ.get('COUNTRY_ISO_CODE', None) is None: + LOGGER.warning('\t+ Offline Emissions Tracker requires COUNTRY_ISO_CODE to be set. We will set it to USA but the carbon footprint might be inaccurate.') + self.emission_tracker = OfflineEmissionsTracker(log_level='warning', tracking_mode='machine', gpu_ids=self.device_ids, measure_power_secs=POWER_CONSUMPTION_SAMPLING_RATE, country_iso_code=os.environ.get('COUNTRY_ISO_CODE', 'USA')) + self.cpu_energy = None + self.gpu_energy = None + self.ram_energy = None + self.total_energy = None + + @contextmanager + def track(self, file_prefix: str='task'): + if self.is_distributed: + torch.distributed.barrier() + if self.is_asynchronous: + torch.cuda.synchronize() + self.emission_tracker.start_task() + yield + if self.is_distributed: + torch.distributed.barrier() + if self.is_asynchronous: + torch.cuda.synchronize() + emission_data: EmissionsData = self.emission_tracker.stop_task() + with open(f'{file_prefix}_codecarbon.json', 'w') as f: + LOGGER.info(f'\t+ Saving codecarbon emission data to {file_prefix}_codecarbon.json') + dump(asdict(emission_data), f, indent=4) + self.cpu_energy = emission_data.cpu_energy + self.gpu_energy = emission_data.gpu_energy + self.ram_energy = emission_data.ram_energy + self.total_energy = emission_data.energy_consumed + + def get_energy(self) -> Energy: + return Energy(unit=ENERGY_UNIT, cpu=self.cpu_energy, gpu=self.gpu_energy, ram=self.ram_energy, total=self.total_energy) + +# File: optimum-benchmark-main/optimum_benchmark/trackers/latency.py +import time +from contextlib import contextmanager +from dataclasses import dataclass +from logging import getLogger +from typing import List, Literal, Optional, Union +from ..import_utils import is_torch_distributed_available +if is_torch_distributed_available(): + import torch.distributed +import numpy as np +import torch +from transformers import LogitsProcessor, TrainerCallback +LOGGER = getLogger('latency') +LATENCY_UNIT = 's' +Latency_Unit_Literal = Literal['s'] +Throughput_Unit_Literal = Literal['samples/s', 'tokens/s', 'images/s', 'steps/s'] + +@dataclass +class Latency: + unit: Latency_Unit_Literal + count: int + total: float + mean: float + stdev: float + p50: float + p90: float + p95: float + p99: float + values: List[float] + + def __getitem__(self, index) -> float: + if isinstance(index, slice): + return Latency.from_values(values=self.values[index], unit=self.unit) + elif isinstance(index, int): + return Latency.from_values(values=[self.values[index]], unit=self.unit) + else: + raise ValueError(f'Invalid index type: {type(index)}, expected int or slice') + + def __sub__(self, latency: 'Latency') -> 'Latency': + latencies = [lat - latency.mean for lat in self.values] + assert not any((latency < 0 for latency in latencies)), 'Negative latency detected' + return Latency.from_values(values=latencies, unit=self.unit) + + @staticmethod + def aggregate(latencies: List['Latency']) -> 'Latency': + if len(latencies) == 0 or all((latency is None for latency in latencies)): + return None + elif any((latency is None for latency in latencies)): + raise ValueError('Some latency measurements are missing') + unit = latencies[0].unit + values = sum((lat.values for lat in latencies), []) + return Latency.from_values(values=values, unit=unit) + + @staticmethod + def from_values(values: List[float], unit: str) -> 'Latency': + return Latency(unit=unit, count=len(values), total=sum(values), mean=np.mean(values), stdev=np.std(values), p50=np.percentile(values, 50), p90=np.percentile(values, 90), p95=np.percentile(values, 95), p99=np.percentile(values, 99), values=values) + + def log(self, prefix: str=''): + stdev_percentage = 100 * self.stdev / self.mean if self.mean > 0 else 0 + LOGGER.info(f'\t\t+ {prefix} latency:') + LOGGER.info(f'\t\t\t- count: {self.count}') + LOGGER.info(f'\t\t\t- total: {self.total:f} {self.unit}') + LOGGER.info(f'\t\t\t- mean: {self.mean:f} {self.unit}') + LOGGER.info(f'\t\t\t- stdev: {self.stdev:f} {self.unit} ({stdev_percentage:.2f}%)') + LOGGER.info(f'\t\t\t- p50: {self.p50:f} {self.unit}') + LOGGER.info(f'\t\t\t- p90: {self.p90:f} {self.unit}') + LOGGER.info(f'\t\t\t- p95: {self.p95:f} {self.unit}') + LOGGER.info(f'\t\t\t- p99: {self.p99:f} {self.unit}') + +@dataclass +class Throughput: + unit: Throughput_Unit_Literal + value: float + + @staticmethod + def aggregate(throughputs: List['Throughput']) -> 'Throughput': + if len(throughputs) == 0: + raise ValueError('No throughput measurements to aggregate') + elif any((throughput is None for throughput in throughputs)): + raise ValueError('Some throughput measurements are missing') + unit = throughputs[0].unit + value = sum((throughput.value for throughput in throughputs)) + return Throughput(value=value, unit=unit) + + @staticmethod + def from_latency(latency: Latency, volume: int, unit: str) -> 'Throughput': + value = volume / latency.mean if latency.mean > 0 else 0 + return Throughput(value=value, unit=unit) + + def log(self, prefix: str='method'): + LOGGER.info(f'\t\t+ {prefix} throughput: {self.value:f} {self.unit}') + +class LatencyTracker: + + def __init__(self, device: str, backend: str): + self.device = device + self.backend = backend + self.is_asynchronous = self.backend == 'pytorch' and self.device == 'cuda' + self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized() + if self.is_asynchronous: + LOGGER.info('\t+ Tracking latency using Pytorch CUDA events') + else: + LOGGER.info('\t+ Tracking latency using CPU performance counter') + self.start_time: Optional[float] = None + self.start_events: List[Union[float, torch.cuda.Event]] = [] + self.end_events: List[Union[float, torch.cuda.Event]] = [] + + def reset(self): + self.start_time = None + self.start_events = [] + self.end_events = [] + + @contextmanager + def track(self): + if self.is_distributed: + torch.distributed.barrier() + if self.is_asynchronous: + yield from self._pytorch_cuda_latency() + else: + yield from self._cpu_latency() + if self.is_distributed: + torch.distributed.barrier() + + def _pytorch_cuda_latency(self): + self.start_events.append(torch.cuda.Event(enable_timing=True)) + self.start_events[-1].record() + yield + self.end_events.append(torch.cuda.Event(enable_timing=True)) + self.end_events[-1].record() + + def _cpu_latency(self): + self.start_events.append(time.perf_counter()) + yield + self.end_events.append(time.perf_counter()) + + def get_latency(self) -> Latency: + if self.is_asynchronous: + torch.cuda.synchronize() + latencies_list = [self.start_events[i].elapsed_time(self.end_events[i]) / 1000.0 for i in range(len(self.start_events))] + else: + latencies_list = [self.end_events[i] - self.start_events[i] for i in range(len(self.start_events))] + assert not any((latency < 0 for latency in latencies_list)), 'Negative latency detected' + return Latency.from_values(latencies_list, unit=LATENCY_UNIT) + + def count(self): + assert len(self.start_events) == len(self.end_events), 'Mismatched number of start and end events, count() should only be called outside of track() context' + return len(self.start_events) + + def elapsed(self): + if self.start_time is None: + assert len(self.start_events) == 0 and len(self.end_events) == 0, 'Number of recorded events is not zero, make sure to reset() the tracker properly' + self.start_time = time.perf_counter() + return time.perf_counter() - self.start_time + +class StepLatencyTrainerCallback(TrainerCallback): + + def __init__(self, device: str, backend: str) -> None: + self.device = device + self.backend = backend + self.is_asynchronous = self.backend == 'pytorch' and self.device == 'cuda' + self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized() + if self.is_asynchronous: + LOGGER.info('\t+ Tracking latency using Pytorch CUDA events') + else: + LOGGER.info('\t+ Tracking latency using CPU performance counter') + self.start_events: List[Union[float, torch.cuda.Event]] = [] + self.end_events: List[Union[float, torch.cuda.Event]] = [] + + def reset(self): + self.start_events = [] + self.end_events = [] + + def on_step_begin(self, *args, **kwargs): + if self.is_asynchronous: + self.start_events.append(torch.cuda.Event(enable_timing=True)) + self.start_events[-1].record() + else: + self.start_events.append(time.perf_counter()) + + def on_step_end(self, *args, **kwargs): + if self.is_asynchronous: + self.end_events.append(torch.cuda.Event(enable_timing=True)) + self.end_events[-1].record() + else: + self.end_events.append(time.perf_counter()) + + def get_latency(self) -> Latency: + if self.is_asynchronous: + torch.cuda.synchronize() + latencies_list = [self.start_events[i].elapsed_time(self.end_events[i]) / 1000.0 for i in range(len(self.start_events))] + else: + latencies_list = [self.end_events[i] - self.start_events[i] for i in range(len(self.start_events))] + assert not any((latency < 0 for latency in latencies_list)), 'Negative latency detected' + return Latency.from_values(latencies_list, unit=LATENCY_UNIT) + +class PerTokenLatencyLogitsProcessor(LogitsProcessor): + + def __init__(self, device: str, backend: str): + self.device = device + self.backend = backend + self.is_asynchronous = self.backend == 'pytorch' and self.device == 'cuda' + self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized() + if self.is_asynchronous: + LOGGER.info('\t+ Tracking latency using Pytorch CUDA events') + else: + LOGGER.info('\t+ Tracking latency using CPU performance counter') + self.start_time: Optional[float] = None + self.prefilled: Optional[bool] = None + self.per_token_events: List[List[Union[float, torch.cuda.Event]]] = [] + self.prefill_start_events: List[Union[float, torch.cuda.Event]] = [] + self.prefill_end_events: List[Union[float, torch.cuda.Event]] = [] + self.decode_start_events: List[Union[float, torch.cuda.Event]] = [] + self.decode_end_events: List[Union[float, torch.cuda.Event]] = [] + + def reset(self): + self.start_time = None + self.prefilled = None + self.per_token_events = [] + self.prefill_start_events = [] + self.prefill_end_events = [] + self.decode_start_events = [] + self.decode_end_events = [] + + @contextmanager + def track(self): + self.prefilled = False + self.per_token_events.append([]) + if self.is_distributed: + torch.distributed.barrier() + if self.is_asynchronous: + self.prefill_start_events.append(torch.cuda.Event(enable_timing=True)) + self.prefill_start_events[-1].record() + else: + self.prefill_start_events.append(time.perf_counter()) + yield + if self.is_asynchronous: + self.decode_end_events.append(torch.cuda.Event(enable_timing=True)) + self.decode_end_events[-1].record() + else: + self.decode_end_events.append(time.perf_counter()) + if self.is_distributed: + torch.distributed.barrier() + self.prefilled = False + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): + assert self.prefilled is not None, 'PerTokenLatencyLogitsProcessor should only be called inside of track() context' + if self.is_asynchronous: + event = torch.cuda.Event(enable_timing=True) + event.record() + else: + event = time.perf_counter() + if not self.prefilled: + self.prefill_end_events.append(event) + self.decode_start_events.append(event) + self.prefilled = True + self.per_token_events[-1].append(event) + return scores + + def get_prefill_latency(self) -> Latency: + if self.is_asynchronous: + torch.cuda.synchronize() + latencies_list = [self.prefill_start_events[i].elapsed_time(self.prefill_end_events[i]) / 1000.0 for i in range(len(self.prefill_start_events))] + else: + latencies_list = [self.prefill_end_events[i] - self.prefill_start_events[i] for i in range(len(self.prefill_start_events))] + assert not any((latency < 0 for latency in latencies_list)), 'Negative latency detected' + return Latency.from_values(latencies_list, unit=LATENCY_UNIT) + + def get_decode_latency(self) -> Latency: + if self.is_asynchronous: + torch.cuda.synchronize() + latencies_list = [self.decode_start_events[i].elapsed_time(self.decode_end_events[i]) / 1000.0 for i in range(len(self.decode_start_events))] + else: + latencies_list = [self.decode_end_events[i] - self.decode_start_events[i] for i in range(len(self.decode_start_events))] + assert not any((latency < 0 for latency in latencies_list)), 'Negative latency detected' + return Latency.from_values(latencies_list, unit=LATENCY_UNIT) + + def get_per_token_latency(self) -> Latency: + if self.is_asynchronous: + torch.cuda.synchronize() + latencies_list = [self.per_token_events[i][j].elapsed_time(self.per_token_events[i][j + 1]) / 1000.0 for i in range(len(self.per_token_events)) for j in range(0, len(self.per_token_events[i]) - 1)] + else: + latencies_list = [self.per_token_events[i][j + 1] - self.per_token_events[i][j] for i in range(len(self.per_token_events)) for j in range(0, len(self.per_token_events[i]) - 1)] + assert not any((latency < 0 for latency in latencies_list)), 'Negative latency detected' + return Latency.from_values(latencies_list, unit=LATENCY_UNIT) + + def count(self): + assert len(self.prefill_start_events) == len(self.prefill_end_events), 'Mismatched number of start and end events, count() should only be called outside of track() context' + return len(self.prefill_start_events) + + def elapsed(self): + if self.start_time is None: + assert len(self.prefill_start_events) == 0 and len(self.prefill_end_events) == 0, 'Number of recorded events is not zero, make sure to reset() the tracker properly' + self.start_time = time.perf_counter() + return time.perf_counter() - self.start_time + +# File: optimum-benchmark-main/optimum_benchmark/trackers/memory.py +import os +from contextlib import contextmanager +from dataclasses import dataclass +from logging import getLogger +from multiprocessing import Pipe, Process +from multiprocessing.connection import Connection +from typing import List, Literal, Optional +from ..import_utils import is_amdsmi_available, is_pynvml_available, is_pyrsmi_available, is_torch_available, is_torch_distributed_available +from ..system_utils import is_nvidia_system, is_rocm_system +if is_rocm_system() and is_pyrsmi_available(): + from pyrsmi import rocml +if is_torch_distributed_available(): + import torch.distributed +if is_nvidia_system() and is_pynvml_available(): + import pynvml +if is_rocm_system() and is_amdsmi_available(): + import amdsmi +if is_torch_available(): + import torch +import psutil +LOGGER = getLogger('memory') +MEMORY_UNIT = 'MB' +Memory_Unit_Literal = Literal['MB'] + +@dataclass +class Memory: + unit: Memory_Unit_Literal + max_ram: float + max_global_vram: Optional[float] = None + max_process_vram: Optional[float] = None + max_reserved: Optional[float] = None + max_allocated: Optional[float] = None + + @staticmethod + def aggregate(memories: List['Memory']) -> 'Memory': + if len(memories) == 0: + raise ValueError('No memory measurements to aggregate') + elif any((memory is None for memory in memories)): + raise ValueError('Some memory measurements are missing') + unit = memories[0].unit + max_ram = sum((memory.max_ram for memory in memories)) + max_process_vram = sum((memory.max_process_vram for memory in memories)) if memories[0].max_process_vram is not None else None + max_reserved = sum((memory.max_reserved for memory in memories)) if memories[0].max_reserved is not None else None + max_allocated = sum((memory.max_allocated for memory in memories)) if memories[0].max_allocated is not None else None + max_global_vram = max((memory.max_global_vram for memory in memories)) if memories[0].max_global_vram is not None else None + return Memory(unit=unit, max_ram=max_ram, max_global_vram=max_global_vram, max_process_vram=max_process_vram, max_reserved=max_reserved, max_allocated=max_allocated) + + def log(self, prefix: str=''): + LOGGER.info(f'\t\t+ {prefix} memory:') + if self.max_ram is not None: + LOGGER.info(f'\t\t\t- max RAM: {self.max_ram:f} ({self.unit})') + if self.max_global_vram is not None: + LOGGER.info(f'\t\t\t- max global VRAM: {self.max_global_vram:f} ({self.unit})') + if self.max_process_vram is not None: + LOGGER.info(f'\t\t\t- max process VRAM: {self.max_process_vram:f} ({self.unit})') + if self.max_reserved is not None: + LOGGER.info(f'\t\t\t- max reserved memory: {self.max_reserved:f} ({self.unit})') + if self.max_allocated is not None: + LOGGER.info(f'\t\t\t- max allocated memory: {self.max_allocated:f} ({self.unit})') + +class MemoryTracker: + + def __init__(self, device: str, backend: str, device_ids: Optional[str]=None): + self.device = device + self.backend = backend + self.device_ids = device_ids + self.monitored_pid = os.getpid() + self.uses_cuda_pytorch_allocator = self.device == 'cuda' and self.backend == 'pytorch' + self.is_distributed = is_torch_distributed_available() and torch.distributed.is_initialized() + LOGGER.info(f'\t+ Tracking RAM memory of process [{self.monitored_pid}]') + if self.device == 'cuda': + if self.device_ids is None: + raise ValueError('The CUDA device IDs must be provided when tracking VRAM memory.') + LOGGER.info(f'\t+ Tracking VRAM memory of CUDA devices [{self.device_ids}]') + self.device_ids = list(map(int, self.device_ids.split(','))) + if self.uses_cuda_pytorch_allocator: + self.num_pytorch_devices = torch.cuda.device_count() + if len(self.device_ids) != self.num_pytorch_devices: + raise ValueError(f"The number of target CUDA devices and Pytorch's CUDA device count do not match. Got {len(self.device_ids)} and {self.num_pytorch_devices} respectively.") + LOGGER.info(f'\t+ Tracking Allocated/Reserved memory of {self.num_pytorch_devices} Pytorch CUDA devices') + self.max_ram_memory = None + self.max_global_vram_memory = None + self.max_process_vram_memory = None + self.max_reserved_memory = None + self.max_allocated_memory = None + + def reset(self): + self.max_ram_memory = None + self.max_global_vram_memory = None + self.max_process_vram_memory = None + self.max_reserved_memory = None + self.max_allocated_memory = None + + @contextmanager + def track(self): + if self.is_distributed: + torch.distributed.barrier() + if self.uses_cuda_pytorch_allocator: + yield from self._cuda_pytorch_memory() + elif self.device == 'cuda': + yield from self._cuda_memory() + else: + yield from self._cpu_memory() + if self.is_distributed: + torch.distributed.barrier() + + def _cuda_pytorch_memory(self): + self.max_allocated_memory = 0 + self.max_reserved_memory = 0 + torch.cuda.synchronize() + for device in range(self.num_pytorch_devices): + try: + torch.cuda.reset_peak_memory_stats(device=device) + except Exception as e: + LOGGER.warning(f'\t\t+ Could not reset max memory stats for device {device}: {e}') + yield from self._cuda_memory() + torch.cuda.synchronize() + for device in range(self.num_pytorch_devices): + try: + self.max_allocated_memory += torch.cuda.max_memory_allocated(device=device) / 1000000.0 + self.max_reserved_memory += torch.cuda.max_memory_reserved(device=device) / 1000000.0 + except Exception as e: + LOGGER.warning(f'\t\t+ Could not get max memory stats for device {device}: {e}') + + def _cuda_memory(self): + (child_connection, parent_connection) = Pipe() + memory_process = Process(target=monitor_gpu_vram_memory, args=(self.monitored_pid, self.device_ids, child_connection), daemon=True) + memory_process.start() + parent_connection.recv() + yield from self._cpu_memory() + parent_connection.send(True) + self.max_global_vram_memory = parent_connection.recv() + self.max_process_vram_memory = parent_connection.recv() + + def _cpu_memory(self): + (child_connection, parent_connection) = Pipe() + memory_process = Process(target=monitor_cpu_ram_memory, args=(self.monitored_pid, child_connection), daemon=True) + memory_process.start() + parent_connection.recv() + yield + parent_connection.send(True) + self.max_ram_memory = parent_connection.recv() + + def get_max_memory(self): + return Memory(unit=MEMORY_UNIT, max_ram=self.max_ram_memory, max_global_vram=self.max_global_vram_memory, max_process_vram=self.max_process_vram_memory, max_reserved=self.max_reserved_memory, max_allocated=self.max_allocated_memory) + +def monitor_cpu_ram_memory(monitored_pid: int, connection: Connection, interval: float=0.001): + stop = False + max_used_memory = 0 + process = psutil.Process(monitored_pid) + connection.send(0) + while not stop: + meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') else 'get_memory_info' + used_memory = getattr(process, meminfo_attr)()[0] + max_used_memory = max(max_used_memory, used_memory) + stop = connection.poll(interval) + connection.send(max_used_memory / 1000000.0) + connection.close() + +def monitor_gpu_vram_memory(monitored_pid: int, device_ids: List[int], connection: Connection, interval: float=0.01): + stop = False + max_used_global_memory = 0 + max_used_process_memory = 0 + monitored_process = psutil.Process(monitored_pid) + connection.send(0) + if is_nvidia_system(): + if not is_pynvml_available(): + raise ValueError('The library pynvml is required to run memory benchmark on NVIDIA GPUs, but is not installed. Please install the official and NVIDIA maintained PyNVML library through `pip install nvidia-ml-py`.') + pynvml.nvmlInit() + devices_handles = [pynvml.nvmlDeviceGetHandleByIndex(device_id) for device_id in device_ids] + while not stop: + used_global_memory = 0 + used_process_memory = 0 + monitored_pids = [monitored_pid] + [child.pid for child in monitored_process.children(recursive=True)] + for (device_id, device_handle) in zip(device_ids, devices_handles): + try: + device_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(device_handle) + except Exception as e: + LOGGER.warning(f'Could not get process list for device {device_id}: {e}.') + continue + for device_process in device_processes: + if device_process.pid in monitored_pids: + used_process_memory += device_process.usedGpuMemory + try: + device_memory = pynvml.nvmlDeviceGetMemoryInfo(device_handle) + except Exception as e: + LOGGER.warning(f'Could not get memory info for device {device_id}: {e}.') + continue + used_global_memory += device_memory.used + max_used_global_memory = max(max_used_global_memory, used_global_memory) + max_used_process_memory = max(max_used_process_memory, used_process_memory) + stop = connection.poll(interval) + pynvml.nvmlShutdown() + elif is_rocm_system(): + if not is_amdsmi_available(): + raise ValueError('The library AMD SMI is required to track process-specific memory benchmark on AMD GPUs, but is not installed. Please install the official and AMD maintained AMD SMI library from https://github.com/ROCm/amdsmi.') + if not is_pyrsmi_available(): + raise ValueError('The library PyRSMI is required to track global-device memory benchmark on AMD GPUs, but is not installed. Please install the official and AMD maintained PyRSMI library from https://github.com/ROCm/pyrsmi.') + amdsmi.amdsmi_init() + rocml.smi_initialize() + permission_denied = False + devices_handles = amdsmi.amdsmi_get_processor_handles() + while not stop: + used_global_memory = 0 + used_process_memory = 0 + monitored_pids = [monitored_pid] + [child.pid for child in monitored_process.children(recursive=True)] + for device_id in device_ids: + device_handle = devices_handles[device_id] + try: + if is_amdsmi_available(): + used_global_memory += amdsmi.amdsmi_get_gpu_memory_total(device_handle, mem_type=amdsmi.AmdSmiMemoryType.VRAM) + elif is_pyrsmi_available(): + used_global_memory += rocml.smi_get_device_memory_used(device_id, type='VRAM') + except Exception as e: + LOGGER.warning(f'Could not get memory usage for device {device_id}: {e}') + if permission_denied: + continue + try: + processes_handles = amdsmi.amdsmi_get_gpu_process_list(device_handle) + except Exception as e: + LOGGER.warning(f'Could not get process list for device {device_id}: {e}') + permission_denied = 'Permission Denied' in str(e) + continue + for process_handle in processes_handles: + try: + gpu_process_info = amdsmi.amdsmi_get_gpu_process_info(device_handle, process_handle) + except Exception as e: + LOGGER.warning(f'Could not get process info for process {process_handle}: {e}') + permission_denied = 'Permission Denied' in str(e) + continue + if gpu_process_info['pid'] in monitored_pids: + max_used_process_memory += gpu_process_info['memory_usage']['vram_mem'] + max_used_global_memory = max(max_used_global_memory, used_global_memory) + max_used_process_memory = max(max_used_process_memory, used_process_memory) + stop = connection.poll(interval) + amdsmi.amdsmi_shut_down() + rocml.smi_shutdown() + else: + raise ValueError('Only NVIDIA and AMD ROCm GPUs are supported for VRAM tracking.') + connection.send(max_used_global_memory / 1000000.0) + connection.send(max_used_process_memory / 1000000.0) + connection.close() +