Fancy-MLLM commited on
Commit
e547e36
·
1 Parent(s): 0663680

Add application file

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,13 +1,13 @@
1
  import gradio as gr
2
- from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
3
  from qwen_vl_utils import process_vision_info
4
  import torch
5
 
6
  # Specify the local cache path for models
7
- local_path = "/root/.cache/huggingface/hub/models--Qwen--Qwen2-VL-7B-Instruct/snapshots/a28a094eb66a9f2ac70eef346f040d8a79977472"
8
 
9
  # Load model and processor
10
- model = Qwen2VLForConditionalGeneration.from_pretrained(
11
  local_path, torch_dtype="auto", device_map="auto"
12
  )
13
 
 
1
  import gradio as gr
2
+ from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor
3
  from qwen_vl_utils import process_vision_info
4
  import torch
5
 
6
  # Specify the local cache path for models
7
+ local_path = "huggingface/Qwen/Qwen2.5-VL-7B-Instruct"
8
 
9
  # Load model and processor
10
+ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
11
  local_path, torch_dtype="auto", device_map="auto"
12
  )
13