sprakhil commited on
Commit
45ab2ce
·
1 Parent(s): f86a1bd
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import streamlit as st
2
  from PIL import Image
3
  import torch
4
- from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, AutoModelForImageToText
5
  from colpali_engine.models import ColPali, ColPaliProcessor
6
  import os
7
 
@@ -9,22 +9,21 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
 
10
  hf_token = os.getenv('HF_TOKEN')
11
  try:
12
- processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-448", use_auth_token=hf_token)
13
- model = AutoModelForImageToText.from_pretrained("google/paligemma-3b-mix-448", use_auth_token=hf_token)
14
  except Exception as e:
15
  st.error(f"Error loading image-to-text model: {e}")
16
  st.stop()
17
 
18
  try:
19
- model_colpali = ColPali.from_pretrained("vidore/colpali-v1.2", torch_dtype=torch.bfloat16, use_auth_token=hf_token).to(device)
20
- processor_colpali = ColPaliProcessor.from_pretrained("google/paligemma-3b-mix-448", use_auth_token=hf_token)
21
  except Exception as e:
22
  st.error(f"Error loading ColPali model or processor: {e}")
23
  st.stop()
24
 
25
  try:
26
- model_qwen = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", use_auth_token=hf_token).to(device)
27
- processor_qwen = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", use_auth_token=hf_token)
28
  except Exception as e:
29
  st.error(f"Error loading Qwen model or processor: {e}")
30
  st.stop()
 
1
  import streamlit as st
2
  from PIL import Image
3
  import torch
4
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, pipeline
5
  from colpali_engine.models import ColPali, ColPaliProcessor
6
  import os
7
 
 
9
 
10
  hf_token = os.getenv('HF_TOKEN')
11
  try:
12
+ model = pipeline("image-to-text", model="google/paligemma-3b-mix-448", use_auth_token=hf_token)
 
13
  except Exception as e:
14
  st.error(f"Error loading image-to-text model: {e}")
15
  st.stop()
16
 
17
  try:
18
+ model_colpali = ColPali.from_pretrained("vidore/colpali-v1.2", torch_dtype=torch.bfloat16).to(device)
19
+ processor_colpali = ColPaliProcessor.from_pretrained("google/paligemma-3b-mix-448")
20
  except Exception as e:
21
  st.error(f"Error loading ColPali model or processor: {e}")
22
  st.stop()
23
 
24
  try:
25
+ model_qwen = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct").to(device)
26
+ processor_qwen = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
27
  except Exception as e:
28
  st.error(f"Error loading Qwen model or processor: {e}")
29
  st.stop()