lixinhao commited on
Commit
104d682
·
verified ·
1 Parent(s): f5a1738

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -107,12 +107,13 @@ pip install flash-attn --no-build-isolation
107
  Then you could use our model:
108
  ```python
109
  from transformers import AutoModel, AutoTokenizer
 
110
 
111
  # model setting
112
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2_5-7B_InternVideo2-1B'
113
 
114
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
115
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
116
  image_processor = model.get_vision_tower().image_processor
117
 
118
  mm_llm_compress = False # use the global compress or not
 
107
  Then you could use our model:
108
  ```python
109
  from transformers import AutoModel, AutoTokenizer
110
+ import torch
111
 
112
  # model setting
113
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2_5-7B_InternVideo2-1B'
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
116
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(torch.bfloat16).cuda()
117
  image_processor = model.get_vision_tower().image_processor
118
 
119
  mm_llm_compress = False # use the global compress or not