|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
from skimage.metrics import structural_similarity as ssim |
|
from transformers import BertForSequenceClassification, BertTokenizer |
|
import torch |
|
from PIL import Image |
|
|
|
|
|
|
|
model = BertForSequenceClassification.from_pretrained("bert-base-uncased") |
|
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") |
|
|
|
|
|
def calculate_ssim(img1, img2): |
|
img1_resized = cv2.resize(img1, (img2.shape[1], img2.shape[0])) |
|
img2_resized = cv2.resize(img2, (img1.shape[1], img1.shape[0])) |
|
img1_gray = cv2.cvtColor(img1_resized, cv2.COLOR_BGR2GRAY) |
|
img2_gray = cv2.cvtColor(img2_resized, cv2.COLOR_BGR2GRAY) |
|
return ssim(img1_gray, img2_gray) |
|
|
|
|
|
def calculate_text_similarity(text1, text2): |
|
encoded_text1 = tokenizer(text1, truncation=True, padding=True, return_tensors="pt") |
|
encoded_text2 = tokenizer(text2, truncation=True, padding=True, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs_text1 = model(**encoded_text1) |
|
outputs_text2 = model(**encoded_text2) |
|
|
|
embeddings_text1 = outputs_text1.pooler_output.squeeze(0) |
|
embeddings_text2 = outputs_text2.pooler_output.squeeze(0) |
|
|
|
text_similarity = ssim(embeddings_text1.numpy(), embeddings_text2.numpy()) |
|
|
|
return text_similarity |
|
|
|
|
|
def calculate_color_similarity(img1, img2): |
|
img1_hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV) |
|
img2_hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV) |
|
hist1 = cv2.calcHist([img1_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) |
|
hist2 = cv2.calcHist([img2_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256]) |
|
color_similarity = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL) |
|
return color_similarity |
|
|
|
|
|
def compare_trademarks(trademark1, trademark2): |
|
img1 = np.array(trademark1) |
|
img2 = np.array(trademark2) |
|
|
|
if img1.shape != img2.shape: |
|
img1_resized = cv2.resize(img1, (img2.shape[1], img2.shape[0])) |
|
img2_resized = cv2.resize(img2, (img1.shape[1], img1.shape[0])) |
|
img1 = img1_resized |
|
img2 = img2_resized |
|
|
|
ssim_score = calculate_ssim(img1, img2) |
|
|
|
text1 = "Trademark text 1" |
|
text2 = "Trademark text 2" |
|
|
|
text_similarity = calculate_text_similarity(text1, text2) |
|
|
|
color_similarity = calculate_color_similarity(img1, img2) |
|
|
|
return ssim_score, text_similarity, color_similarity |
|
|
|
|
|
def prevent_trademark_conflict(trademark1, trademark2): |
|
similarity_scores = compare_trademarks(trademark1, trademark2) |
|
return similarity_scores |
|
|
|
|
|
|
|
trademark_comparison_interface = gr.Interface( |
|
fn=prevent_trademark_conflict, |
|
inputs=[ |
|
gr.inputs.Image(type="pil", label="Trademark Image 1"), |
|
gr.inputs.Image(type="pil", label="Trademark Image 2"), |
|
], |
|
outputs="text", |
|
title="Trademark Comparison", |
|
description="Compare two trademarks based on SSIM, text similarity, and color similarity.", |
|
) |
|
|
|
|
|
trademark_comparison_interface.launch() |
|
|