from transformers import PretrainedConfig | |
from typing import List | |
class InfMLLMConfig(PretrainedConfig): | |
def __init__( | |
self, | |
image_size="448", | |
vit_model="eva_clip_g", | |
pool_out_size="32", | |
lm_model="pretrain_models/lmsys/vicuna-7b-v1.5/", | |
lm_tokenizer="pretrain_models/lmsys/vicuna-7b-v1.5/", | |
precision="amp_bf16", | |
**kwargs | |
): | |
self.image_size = image_size | |
self.vit_model = vit_model | |
self.pool_out_size = pool_out_size | |
self.lm_model = lm_model | |
self.lm_tokenizer = lm_tokenizer | |
self.precision = precision | |
super().__init__(**kwargs) | |