File size: 2,974 Bytes
1a43383
b12bd2d
 
 
4911012
 
c750729
4911012
 
 
 
 
 
7559e30
0d2d614
216bce4
 
 
 
4911012
 
 
 
 
 
 
 
 
 
 
2d52262
 
4911012
 
 
 
 
 
 
5c3e19b
 
 
 
 
 
4911012
 
0d2d614
c750729
 
4911012
5c3e19b
 
 
 
 
 
4911012
 
 
 
 
2550810
 
4911012
 
 
 
 
0d2d614
4911012
 
 
27b7cb5
2d52262
4911012
09fdd01
 
 
e69800f
 
 
 
2d52262
e69800f
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim
from transformers import BertForSequenceClassification, BertTokenizer
import torch
from PIL import Image


# Load pre-trained model and tokenizer
model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")


def calculate_ssim(img1, img2):
    img1_resized = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
    img2_resized = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
    img1_gray = cv2.cvtColor(img1_resized, cv2.COLOR_BGR2GRAY)
    img2_gray = cv2.cvtColor(img2_resized, cv2.COLOR_BGR2GRAY)
    return ssim(img1_gray, img2_gray)


def calculate_text_similarity(text1, text2):
    encoded_text1 = tokenizer(text1, truncation=True, padding=True, return_tensors="pt")
    encoded_text2 = tokenizer(text2, truncation=True, padding=True, return_tensors="pt")

    with torch.no_grad():
        outputs_text1 = model(**encoded_text1)
        outputs_text2 = model(**encoded_text2)

    embeddings_text1 = outputs_text1.pooler_output.squeeze(0)
    embeddings_text2 = outputs_text2.pooler_output.squeeze(0)

    text_similarity = ssim(embeddings_text1.numpy(), embeddings_text2.numpy())

    return text_similarity


def calculate_color_similarity(img1, img2):
    img1_hsv = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
    img2_hsv = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
    hist1 = cv2.calcHist([img1_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
    hist2 = cv2.calcHist([img2_hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
    color_similarity = cv2.compareHist(hist1, hist2, cv2.HISTCMP_CORREL)
    return color_similarity


def compare_trademarks(trademark1, trademark2):
    img1 = np.array(trademark1)
    img2 = np.array(trademark2)

    if img1.shape != img2.shape:
        img1_resized = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
        img2_resized = cv2.resize(img2, (img1.shape[1], img1.shape[0]))
        img1 = img1_resized
        img2 = img2_resized

    ssim_score = calculate_ssim(img1, img2)

    text1 = "Trademark text 1"
    text2 = "Trademark text 2"

    text_similarity = calculate_text_similarity(text1, text2)

    color_similarity = calculate_color_similarity(img1, img2)

    return ssim_score, text_similarity, color_similarity


def prevent_trademark_conflict(trademark1, trademark2):
    similarity_scores = compare_trademarks(trademark1, trademark2)
    return similarity_scores


# Interface
trademark_comparison_interface = gr.Interface(
    fn=prevent_trademark_conflict,
    inputs=[
        gr.inputs.Image(type="pil", label="Trademark Image 1"),
        gr.inputs.Image(type="pil", label="Trademark Image 2"),
    ],
    outputs="text",
    title="Trademark Comparison",
    description="Compare two trademarks based on SSIM, text similarity, and color similarity.",
)

# Launch the interface
trademark_comparison_interface.launch()