phi-vlm / app.py
jatingocodeo's picture
Update app.py
6ae1346 verified
raw
history blame
4.22 kB
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
from PIL import Image
import torchvision.datasets as datasets
def load_model(model_id):
# First load the base model
base_model_id = "microsoft/Phi-3-mini-4k-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
# Ensure tokenizer has padding token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
base_model = AutoModelForCausalLM.from_pretrained(
base_model_id,
torch_dtype=torch.float16, # Use float16 like assignment22
device_map="auto",
trust_remote_code=True
)
# Load the LoRA adapter
model = PeftModel.from_pretrained(base_model, model_id)
return model, tokenizer
def generate_description(image, model, tokenizer, max_length=100, temperature=0.7, top_p=0.9):
# Convert and resize image
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((32, 32))
# Format the input text
input_text = """Below is an image. Please describe it in detail.
Image: [IMAGE]
Description: """
# Tokenize input
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
# Generate response
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=max_length,
temperature=temperature,
top_p=top_p,
num_return_sequences=1,
pad_token_id=tokenizer.eos_token_id,
do_sample=True
)
# Decode and return the response
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return full_text.split("Description: ")[-1].strip()
def create_demo(model_id):
# Load model and tokenizer
model, tokenizer = load_model(model_id)
# Get CIFAR10 examples
cifar10_test = datasets.CIFAR10(root='./data', train=False, download=True)
examples = []
used_classes = set()
for idx in range(len(cifar10_test)):
img, label = cifar10_test[idx]
class_name = cifar10_test.classes[label]
if class_name not in used_classes:
examples.append(img)
used_classes.add(class_name)
if len(used_classes) == 10:
break
# Define the interface function
def process_image(image, max_length, temperature, top_p):
try:
return generate_description(
image,
model,
tokenizer,
max_length=max_length,
temperature=temperature,
top_p=top_p
)
except Exception as e:
return f"Error generating description: {str(e)}"
# Create the interface
demo = gr.Interface(
fn=process_image,
inputs=[
gr.Image(type="pil", label="Input Image"),
gr.Slider(
minimum=50,
maximum=200,
value=100,
step=10,
label="Maximum Length"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.1,
label="Temperature"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.1,
label="Top P"
)
],
outputs=gr.Textbox(label="Generated Description", lines=5),
title="Image Description Generator",
description="""This model generates detailed descriptions of images.
You can adjust the generation parameters:
- **Maximum Length**: Controls the length of the generated description
- **Temperature**: Higher values make the description more creative
- **Top P**: Controls the randomness in word selection
""",
examples=[[ex] for ex in examples]
)
return demo
if __name__ == "__main__":
# Use your model ID
model_id = "jatingocodeo/phi-vlm"
demo = create_demo(model_id)
demo.launch()