Prasi21's picture
Update added quanitzation config
1aad204 verified
raw
history blame
1.62 kB
import torch
import gradio as gr
from transformers import AutoProcessor, Blip2ForConditionalGeneration, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model, PeftModel
# Load the processor
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
# Load the base model from the original repository
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
base_model = Blip2ForConditionalGeneration.from_pretrained(
"ybelkada/blip2-opt-2.7b-fp16-sharded",
device_map="auto",
quantization_config=quantization_config
)
repo_id = "Prasi21/blip2-opt-2.7b-strep-throat-caption-adapters"
# Load the fine-tuned LoRA adapters from the Hugging Face Hub
model = PeftModel.from_pretrained(base_model, repo_id)
# Define the prediction function
def predict(image):
# Preprocess the image
inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
new_eos_token_id = 13
with torch.no_grad():
generated_ids = model.generate(**inputs, max_length=100,
eos_token_id=new_eos_token_id)
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)
return f"{generated_caption[0]}"
# Set up the Gradio interface
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"), # Upload an image in PIL format
outputs=gr.Textbox(), # The output will be the generated caption
title="Strep Throat Image Assessment",
description="Upload an image of a throat and receive a medical assessment caption based on the model's output."
)
# Launch the Gradio app
demo.launch()