prithivMLmods commited on
Commit
fee776e
·
verified ·
1 Parent(s): 455b690

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -27,7 +27,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
27
 
28
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
29
 
30
- # Load Cosmos-Reason1-7B
31
  MODEL_ID_M = "Qwen/Qwen2.5-VL-7B-Instruct"
32
  processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
33
  model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
@@ -36,7 +36,7 @@ model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
36
  torch_dtype=torch.float16
37
  ).to(device).eval()
38
 
39
- # Load DocScope
40
  MODEL_ID_X = "Qwen/Qwen2.5-VL-3B-Instruct"
41
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
42
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
27
 
28
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
29
 
30
+ # Load Qwen2.5-VL-7B-Instruct
31
  MODEL_ID_M = "Qwen/Qwen2.5-VL-7B-Instruct"
32
  processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
33
  model_m = Qwen2_5_VLForConditionalGeneration.from_pretrained(
 
36
  torch_dtype=torch.float16
37
  ).to(device).eval()
38
 
39
+ # Load Qwen2.5-VL-3B-Instruct
40
  MODEL_ID_X = "Qwen/Qwen2.5-VL-3B-Instruct"
41
  processor_x = AutoProcessor.from_pretrained(MODEL_ID_X, trust_remote_code=True)
42
  model_x = Qwen2_5_VLForConditionalGeneration.from_pretrained(