Spaces:
Sleeping
Sleeping
File size: 4,745 Bytes
3255105 0e007bb 6ae1346 b81df6e 3255105 0e007bb 6ae1346 0e007bb d70db54 6ae1346 0e007bb d70db54 cbb5f6b 3255105 0e007bb cbb5f6b 3255105 cbb5f6b b81df6e cbb5f6b b81df6e d98b4df 3255105 6ae1346 0e007bb 6ae1346 3255105 0e007bb 3255105 0e007bb 3255105 d98b4df 0bdc84a 0e007bb 0bdc84a 3255105 0e007bb 0bdc84a 0e007bb d98b4df 6ae1346 0e007bb 6ae1346 0e007bb 6ae1346 d98b4df 0e007bb 6ae1346 0e007bb 6ae1346 0e007bb 6ae1346 0e007bb 6ae1346 0e007bb 6ae1346 0e007bb 6ae1346 0e007bb 3255105 0e007bb 6ae1346 0e007bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
from PIL import Image
import torchvision.datasets as datasets
import os
def load_model(model_id):
# First load the base model
base_model_id = "microsoft/Phi-3-mini-4k-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
# Ensure tokenizer has padding token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load base model for CPU
base_model = AutoModelForCausalLM.from_pretrained(
base_model_id,
torch_dtype=torch.float32, # Use float32 for CPU
device_map="cpu", # Force CPU
trust_remote_code=True,
low_cpu_mem_usage=True # Enable memory optimization
)
# Load the LoRA adapter
model = PeftModel.from_pretrained(
base_model,
model_id,
device_map="cpu" # Force CPU
)
return model, tokenizer
def generate_description(image, model, tokenizer, max_length=100, temperature=0.7, top_p=0.9):
# Convert and resize image
if image.mode != "RGB":
image = image.convert("RGB")
image = image.resize((32, 32))
# Format the input text
input_text = """Below is an image. Please describe it in detail.
Image: [IMAGE]
Description: """
# Tokenize input
inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
# Generate response
with torch.no_grad():
outputs = model.generate(
input_ids=inputs['input_ids'], # Explicitly use input_ids
attention_mask=inputs['attention_mask'], # Add attention mask
max_new_tokens=max_length,
temperature=temperature,
top_p=top_p,
do_sample=True,
pad_token_id=tokenizer.pad_token_id,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
use_cache=True, # Enable caching
return_dict_in_generate=True, # Return as dict
output_scores=True # Get scores
)
# Decode and return the response
generated_text = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)
return generated_text.split("Description: ")[-1].strip()
def create_demo(model_id):
# Load model and tokenizer
model, tokenizer = load_model(model_id)
# Get CIFAR10 examples
cifar10_test = datasets.CIFAR10(root='./data', train=False, download=True)
examples = []
used_classes = set()
for idx in range(len(cifar10_test)):
img, label = cifar10_test[idx]
class_name = cifar10_test.classes[label]
if class_name not in used_classes:
examples.append(img)
used_classes.add(class_name)
if len(used_classes) == 10:
break
# Define the interface function
def process_image(image, max_length, temperature, top_p):
try:
return generate_description(
image,
model,
tokenizer,
max_length=max_length,
temperature=temperature,
top_p=top_p
)
except Exception as e:
return f"Error generating description: {str(e)}"
# Create the interface
demo = gr.Interface(
fn=process_image,
inputs=[
gr.Image(type="pil", label="Input Image"),
gr.Slider(
minimum=50,
maximum=200,
value=100,
step=10,
label="Maximum Length"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.7,
step=0.1,
label="Temperature"
),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.9,
step=0.1,
label="Top P"
)
],
outputs=gr.Textbox(label="Generated Description", lines=5),
title="Image Description Generator",
description="""This model generates detailed descriptions of images.
You can adjust the generation parameters:
- **Maximum Length**: Controls the length of the generated description
- **Temperature**: Higher values make the description more creative
- **Top P**: Controls the randomness in word selection
""",
examples=[[ex] for ex in examples]
)
return demo
if __name__ == "__main__":
# Use your model ID
model_id = "jatingocodeo/phi-vlm"
demo = create_demo(model_id)
demo.launch() |