paligemma_ft_v1 / app.py
triphuong57's picture
Create app.py
2daf177 verified
raw
history blame
1.19 kB
import gradio as gr
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration, BitsAndBytesConfig
from peft import PeftModel
import spaces
import torch
from huggingface_hub.hf_api import HfFolder
import os
token = os.getenv('token')
HfFolder.save_token(token)
device = "cuda"
model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-mix-224")
processor = AutoProcessor.from_pretrained("google/paligemma-3b-mix-224")
@spaces.GPU(duration=120)
def greet(image, prompt):
# quantization_config = BitsAndBytesConfig(
# load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16
# )
# model = PaliGemmaForConditionalGeneration.from_pretrained("/folders", torch_dtype=torch.float16, quantization_config=quantization_config).to(device)
# # model = PeftModel(base_model, "/folders").to(device)
inputs = processor(prompt, raw_image, return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=20)
return output
demo = gr.Interface(fn=greet, inputs=[gr.Image(label="Upload image", sources=['upload', 'webcam'], type="pil"), gr.Text()], outputs="text")
demo.launch()