File size: 2,412 Bytes
1a43383 b12bd2d 4911012 7559e30 0d2d614 4911012 0d2d614 4911012 2550810 4911012 0d2d614 4911012 27b7cb5 4911012 0d2d614 690ada3 0986dc7 4911012 690ada3 4911012 690ada3 b12bd2d 4911012 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import gradio as gr
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim
from transformers import BertForSequenceClassification, BertTokenizer
import torch
# Load pre-trained model and tokenizer
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def calculate_ssim(img1, img2):
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
return ssim(img1_gray, img2_gray)
def calculate_text_similarity(text1, text2):
encoded_text1 = tokenizer(text1, truncation=True, padding=True, return_tensors="pt")
encoded_text2 = tokenizer(text2, truncation=True, padding=True, return_tensors="pt")
with torch.no_grad():
outputs_text1 = model(**encoded_text1)
outputs_text2 = model(**encoded_text2)
embeddings_text1 = outputs_text1.last_hidden_state.squeeze(0)
embeddings_text2 = outputs_text2.last_hidden_state.squeeze(0)
text_similarity = ssim(embeddings_text1.numpy(), embeddings_text2.numpy())
return text_similarity
def calculate_color_similarity(img1, img2):
img1_lab = cv2.cvtColor(img1, cv2.COLOR_BGR2Lab)
img2_lab = cv2.cvtColor(img2, cv2.COLOR_BGR2Lab)
color_similarity = ssim(img1_lab, img2_lab, multichannel=True)
return color_similarity
def compare_trademarks(trademark1, trademark2):
img1 = cv2.imread(trademark1)
img2 = cv2.imread(trademark2)
ssim_score = calculate_ssim(img1, img2)
text1 = "Trademark text 1"
text2 = "Trademark text 2"
text_similarity = calculate_text_similarity(text1, text2)
color_similarity = calculate_color_similarity(img1, img2)
return ssim_score, text_similarity, color_similarity
def prevent_trademark_conflict(trademark1, trademark2):
similarity_scores = compare_trademarks(trademark1, trademark2)
return similarity_scores
# Interface
trademark_comparison_interface = gr.Interface(
fn=prevent_trademark_conflict,
inputs=[
gr.inputs.Image(type="pil", label="Trademark Image 1"),
gr.inputs.Image(type="pil", label="Trademark Image 2"),
],
outputs="text",
title="Trademark Comparison",
description="Compare two trademarks based on SSIM, text similarity, and color similarity.",
)
# Launch the interface
trademark_comparison_interface.launch()
|