product-catalog / app.py
samnji's picture
step 1
47d7fa1
raw
history blame
2.75 kB
import gradio as gr
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from transformers import CLIPProcessor, CLIPModel
import torch
#Load the pre-trained CLIP model and its tokenizer:
model_name = "openai/clip-vit-base-patch32"
processor = CLIPProcessor.from_pretrained(model_name)
model = CLIPModel.from_pretrained(model_name)
#Define the preprocessing function for images:
def preprocess_image(image):
preprocess = Compose([
Resize(256, interpolation=Image.BICUBIC),
CenterCrop(224),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
return preprocess(image).unsqueeze(0)
#Define a function to process the image and text inputs:
def process_query(image, text):
image_tensor = preprocess_image(image)
inputs = processor(text, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
outputs = model(input_ids=inputs["input_ids"], pixel_values=image_tensor)
logits_per_image = outputs.logits_per_image
probs = logits_per_image.softmax(dim=-1)
similarities = probs.squeeze()
# Calculate the similarity scores for each product in the dataset
product_scores = []
for product in deepfashion_database:
# Preprocess product image and compute similarity
product_image = Image.open(product["image_path"]).convert("RGB")
product_image_tensor = preprocess_image(product_image)
product_text = product["description"]
product_inputs = processor(product_text, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
product_outputs = model(input_ids=product_inputs["input_ids"], pixel_values=product_image_tensor)
product_logits_per_image = product_outputs.logits_per_image
product_probs = product_logits_per_image.softmax(dim=-1)
product_similarity = product_probs.squeeze().item()
product_scores.append((product, product_similarity))
# Sort products by similarity and return the top 3 matches
top_3_products = sorted(product_scores, key=lambda x: x[1], reverse=True)[:3]
return '\n'.join([f"{product['description']} (score: {score:.2f})" for product, score in top_3_products])
@gr.app
def product_search_app(image, text):
return process_query(image, text)
iface = gr.Interface(
fn=process_query,
inputs=[
gr.inputs.Image(),
gr.inputs.Textbox(lines=3, label="Text Query")
],
outputs=gr.outputs.Textbox(label="Top 3 Matches"),
title="Product Search",
description="Find the best matching products based on image and text queries.",
)
iface.launch()