iamrobotbear commited on
Commit
54ddd45
·
1 Parent(s): 87b83c0

Can I also try this with blip2-flan-t5-xxl?

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -6,11 +6,21 @@ from PIL import Image
6
  # Check for GPU availability and set the device variable accordingly
7
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
 
 
9
  # Load the BLIP-2 model and processor
10
- processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
11
  # Load model in int8 using bitsandbytes, and pass device_map='auto'
12
  model = Blip2ForConditionalGeneration.from_pretrained(
13
- "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map='auto'
 
 
 
 
 
 
 
 
 
14
  )
15
 
16
  def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):
 
6
  # Check for GPU availability and set the device variable accordingly
7
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
 
9
+
10
  # Load the BLIP-2 model and processor
11
+ processor = AutoProcessor.from_pretrained("Salesforce/blip2-flan-t5-xxl")
12
  # Load model in int8 using bitsandbytes, and pass device_map='auto'
13
  model = Blip2ForConditionalGeneration.from_pretrained(
14
+ "Salesforce/blip2-flan-t5-xxl", load_in_8bit=True, device_map='auto'
15
+ )
16
+
17
+ # Uncomment lines 20, 22, 23 to begin using blip2-oopt-2.7b model
18
+
19
+ # Load the BLIP-2 model and processor
20
+ #processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
21
+ # Load model in int8 using bitsandbytes, and pass device_map='auto'
22
+ #model = Blip2ForConditionalGeneration.from_pretrained(
23
+ # "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map='auto'
24
  )
25
 
26
  def blip2_interface(image, prompted_caption_text, vqa_question, chat_context):