Prasi21 commited on
Commit
1aad204
·
verified ·
1 Parent(s): a660aaa

Update added quanitzation config

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import torch
2
  import gradio as gr
3
- from transformers import AutoProcessor, Blip2ForConditionalGeneration
4
  from peft import LoraConfig, get_peft_model, PeftModel
5
 
 
6
  # Load the processor
7
  processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
8
 
9
  # Load the base model from the original repository
 
10
  base_model = Blip2ForConditionalGeneration.from_pretrained(
11
  "ybelkada/blip2-opt-2.7b-fp16-sharded",
12
  device_map="auto",
 
1
  import torch
2
  import gradio as gr
3
+ from transformers import AutoProcessor, Blip2ForConditionalGeneration, BitsAndBytesConfig
4
  from peft import LoraConfig, get_peft_model, PeftModel
5
 
6
+
7
  # Load the processor
8
  processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
9
 
10
  # Load the base model from the original repository
11
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
12
  base_model = Blip2ForConditionalGeneration.from_pretrained(
13
  "ybelkada/blip2-opt-2.7b-fp16-sharded",
14
  device_map="auto",