File size: 13,661 Bytes
973182c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c072a2e
973182c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
import gradio as gr
import pandas as pd
import cv2
import numpy as np
import requests
import torch
import base64
import os
import logging
from io import BytesIO
from PIL import Image
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from extract_text import extract_text_from_image
from models import TextSimilarityRequest
from text_similarity import analyze_similarity
from starlette.responses import JSONResponse


logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
app = FastAPI()
# app.add_middleware(
#     CORSMiddleware,
#     allow_origins=["*"],
#     allow_credentials=True,
#     allow_methods=["*"],
#     allow_headers=["*"],
# )

@app.post("/text_similarity", summary="Perform images text similarity", response_model=float, tags=["Text Similarities"])
async def text_similarity(request: TextSimilarityRequest):
    image_info = request.imageInfo
    key_texts = request.keyTexts
    similarity_threshold = request.similarityThreshold 
    origin_id = image_info.originId

    logging.info(f"Checking text similarity for main source with resource id {origin_id}")

    image = load_image_url(image_info.source)

    # Extract text from the image using the user's method
    gpu_available = torch.cuda.is_available()
    extracted_texts = extract_text_from_image(image, gpu_available)

    results = analyze_similarity(
            extracted_texts,
            key_texts,
            similarity_threshold=similarity_threshold/100,  # Convert percentage to decimal
            fragment_threshold=100/100  # Convert percentage to decimal
        )
    
    log_similarity_report(results, origin_id)
    
    total_texts = len(key_texts)
    passed_texts = results["statistics"]["total_processed"]

    percentage_passed = (passed_texts / total_texts) * 100

    logging.info(f"Text similarity for main source with resource id {origin_id} is {percentage_passed}%")

    return percentage_passed

def log_similarity_report(results, originId):    
    # General statistics
    logging.info(f"[{originId}] Total texts analyzed: {results['statistics']['total_analyzed']}")
    logging.info(f"[{originId}] Texts with detected similarity: {results['statistics']['total_processed']}")
    
    # Similar texts
    if results["similar_texts"]:
        logging.info(f"[{originId}] Direct Similar Texts Found: {len(results['similar_texts'])}")
        for item in results["similar_texts"]:
            logging.info(f"[{originId}] Similar Text: '{item['text']}' -> Key Text: '{item['key_text']}' with Similarity: {item['similarity']:.2%}")
    
    # Detected fragments
    if results["fragments_detected"]:
        logging.info(f"[{originId}] Fragments Detected: {len(results['fragments_detected'])}")
        for item in results["fragments_detected"]:
            logging.info(f"[{originId}] Fragment: '{item['text']}' -> Key Text: '{item['key_text']}' with Similarity: {item['similarity']:.2%}")
    
    # Combined texts
    if results["combined"]:
        logging.info(f"[{originId}] Texts to be Combined: {len(results['combined'])}")
        for item in results["combined"]:
            logging.info(f"[{originId}] Combined Text: '{item['combined_text']}' -> Key Text: '{item['key_text']}' with Similarity: {item['similarity']:.2%}")
    
    # If no significant similarity found
    if not (results["similar_texts"] or results["fragments_detected"] or results["combined"]):
        logging.info(f"[{originId}] No significant similarity found.")
    
    # Statistics
    logging.info(f"[{originId}] Direct similarity: {results['statistics']['direct_similarity']}")
    logging.info(f"[{originId}] Fragments: {results['statistics']['fragments']}")
    logging.info(f"[{originId}] Combined: {results['statistics']['combined']}")

def load_image_url(source):
    Image.MAX_IMAGE_PIXELS = None

    if source.startswith('http'):
        response = requests.get(source)
        img = np.asarray(bytearray(response.content), dtype=np.uint8)
        img = cv2.imdecode(img, cv2.IMREAD_GRAYSCALE)
    else:
        img = base64.b64decode(source)
        img = Image.open(BytesIO(img))
        img = np.array(img)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    return img

def process_image(image, key_texts, similarity_threshold, fragment_threshold):
    """Processes the image, extracts text, and analyzes similarities."""
    try:
        if image is None:
            return "Please upload an image for analysis.", None, None, None, None, None
        
        if not key_texts.strip():
            return "Please enter key texts for comparison.", None, None, None, None, None
        
        # Extract text from the image using the user's method
        gpu_available = torch.cuda.is_available()
        extracted_texts = extract_text_from_image(image, gpu_available)
    
        if isinstance(key_texts, str):
            key_texts = [text.strip() for text in key_texts.split('\n') if text.strip()]
        
        # Process the analysis
        results = analyze_similarity(
            extracted_texts,
            key_texts,
            similarity_threshold=similarity_threshold/100,  # Convert percentage to decimal
            fragment_threshold=fragment_threshold/100  # Convert percentage to decimal
        )
        
        # Gerar relatório HTML
        html_report = generate_html_report(results)
        
        # Gerar DataFrames
        dfs = generate_results_dataframe(results)
        
        # Extrair DataFrames individuais (ou criar vazios se não existirem)
        df_statistics = dfs.get("statistics", pd.DataFrame())
        df_similar = dfs.get("similar", pd.DataFrame(columns=["Index", "Original Text", "Key Text", "Similarity"]))
        df_fragments = dfs.get("fragments", pd.DataFrame(columns=["Index", "Original Text", "Key Text", "Similarity"]))
        df_combined = dfs.get("combined", pd.DataFrame(columns=["Indices", "Text 1", "Text 2", "Combined Text", "Key Text", "Similarity"]))
    
        return html_report, df_statistics, df_similar, df_fragments, df_combined, extracted_texts, gpu_available

    
    except Exception as e:
        return f"Erro ao processar: {str(e)}", None, None, None, None, None
    
def process_manual_input(texts, key_texts, similarity_threshold, fragment_threshold):
    """Processes the user's manual text input."""
    # Validate input
    if not texts.strip() or not key_texts.strip():
        return "Please enter texts for analysis and key texts for comparison.", None, None, None, None
    
    try:
        # Process the analysis
        results = analyze_similarity(
            texts,
            key_texts,
            similarity_threshold=similarity_threshold/100,  # Convert percentage to decimal
            fragment_threshold=fragment_threshold/100  # Convert percentage to decimal
        )
        
        # Generate HTML report
        html_report = generate_html_report(results)
        
        # Gerar DataFrames
        dfs = generate_results_dataframe(results)
        
        # Extract individual DataFrames (or create empty ones if they don't exist)
        df_statistics = dfs.get("statistics", pd.DataFrame())
        df_similar = dfs.get("similar", pd.DataFrame(columns=["Index", "Original Text", "Key Text", "Similarity"]))
        df_fragments = dfs.get("fragments", pd.DataFrame(columns=["Index", "Original Text", "Key Text", "Similarity"]))
        df_combined = dfs.get("combined", pd.DataFrame(columns=["Indices", "Text 1", "Text 2", "Combined Text", "Key Text", "Similarity"]))
        
        return html_report, df_statistics, df_similar, df_fragments, df_combined
    
    except Exception as e:
        return f"Erro ao processar: {str(e)}", None, None, None, None

def generate_html_report(results):
    """Generates an HTML report about the detected similarities."""
    html = "<h2>Similarity Report</h2>"
    
    # General statistics
    html += "<div padding: 15px; border-radius: 5px; margin-bottom: 20px;'>"
    html += f"<p><b>Total texts analyzed:</b> {results['statistics']['total_analyzed']}</p>"
    html += f"<p><b>Texts with detected similarity:</b> {results['statistics']['total_processed']}</p>"
    html += "</div>"
    
     # Results table
    html += "<h3>Detected Similarities</h3>"
    
    # Similar texts
    if results["similar_texts"]:
        html += "<h4>Direct Similar Texts</h4>"
        html += "<table width='100%' style='border-collapse: collapse; margin-bottom: 20px;'>"
        html += "<tr><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Original Text</th><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Key Text</th><th style='text-align: center; padding: 8px; border: 1px solid #ddd;'>Similarity</th></tr>"
        
        for item in results["similar_texts"]:
            html += f"<tr><td style='padding: 8px; border: 1px solid #ddd;'>{item['text']}</td><td style='padding: 8px; border: 1px solid #ddd;'>{item['key_text']}</td><td style='text-align: center; padding: 8px; border: 1px solid #ddd;'>{item['similarity']:.2%}</td></tr>"
        
        html += "</table>"
    
    # Detected fragments
    if results["fragments_detected"]:
        html += "<h4>Text with Detected Fragments</h4>"
        html += "<table width='100%' style='border-collapse: collapse; margin-bottom: 20px;'>"
        html += "<tr><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Original Text</th><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Key Text</th><th style='text-align: center; padding: 8px; border: 1px solid #ddd;'>Similarity</th></tr>"
        
        for item in results["fragments_detected"]:
            html += f"<tr><td style='padding: 8px; border: 1px solid #ddd;'>{item['text']}</td><td style='padding: 8px; border: 1px solid #ddd;'>{item['key_text']}</td><td style='text-align: center; padding: 8px; border: 1px solid #ddd;'>{item['similarity']:.2%}</td></tr>"
        
        html += "</table>"
    
    # Combined texts
    if results["combined"]:
        html += "<h4>Text that need to be combined</h4>"
        html += "<table width='100%' style='border-collapse: collapse; margin-bottom: 20px;'>"
        html += "<tr><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Text 1</th><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Text 2</th><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Combination</th><th style='text-align: left; padding: 8px; border: 1px solid #ddd;'>Key Text</th><th style='text-align: center; padding: 8px; border: 1px solid #ddd;'>Similarity</th></tr>"
        
        for item in results["combined"]:
            html += f"<tr><td style='padding: 8px; border: 1px solid #ddd;'>{item['texts'][0]}</td><td style='padding: 8px; border: 1px solid #ddd;'>{item['texts'][1]}</td><td style='padding: 8px; border: 1px solid #ddd;'>{item['combined_text']}</td><td style='padding: 8px; border: 1px solid #ddd;'>{item['key_text']}</td><td style='text-align: center; padding: 8px; border: 1px solid #ddd;'>{item['similarity']:.2%}</td></tr>"
        
        html += "</table>"
    
    if not (results["similar_texts"] or results["fragments_detected"] or results["combined"]):
        html += "<p>No significant similarity found with the current parameters.</p>"
    
    return html

def generate_results_dataframe(results):
    """Generates pandas DataFrames to visualize the results."""
    dfs = {}
    
    # DataFrame for similar texts
    if results["similar_texts"]:
        data = [(item['index'], item['text'], item['key_text'], f"{item['similarity']:.2%}") 
                for item in results["similar_texts"]]
        dfs["similar"] = pd.DataFrame(data, columns=["Index", "Original Text", "Key Text", "Similarity"])
    
    # DataFrame for fragments
    if results["fragments_detected"]:
        data = [(item['index'], item['text'], item['key_text'], f"{item['similarity']:.2%}") 
                for item in results["fragments_detected"]]
        dfs["fragments"] = pd.DataFrame(data, columns=["Index", "Original Text", "Key Text", "Similarity"])
    
    # DataFrame for combined
    if results["combined"]:
        data = [(f"{item['indices'][0]},{item['indices'][1]}", 
                  item['texts'][0], 
                  item['texts'][1],
                  item['combined_text'],
                  item['key_text'], 
                  f"{item['similarity']:.2%}") 
                for item in results["combined"]]
        dfs["combined"] = pd.DataFrame(data, columns=["Indices", "Text 1", "Text 2", 
                                                        "Combined Text", "Key Text", "Similarity"])
    
    # Statistics DataFrame
    data = [
        ("Total analyzed", results["statistics"]["total_analyzed"]),
        ("Total with similarity", results["statistics"]["total_processed"]),
        ("Direct similarity", results["statistics"]["direct_similarity"]),
        ("Fragments", results["statistics"]["fragments"]),
        ("Combined", results["statistics"]["combined"])
    ]
    dfs["statistics"] = pd.DataFrame(data, columns=["Metric", "Value"])
    
    return dfs
    
#app = gr.mount_gradio_app(app, demo, path="/")

@app.get("/api")
def read_root():
    return JSONResponse(content={"message": "Hello from FastAPI inside Gradio!"})

# if __name__ == "__main__":
#     import uvicorn
#     uvicorn.run(app)

# PORT = int(os.getenv("PORT", 7860))

# if __name__ == "__main__":
#     import uvicorn
#     print(f"A arrancar na porta {PORT}...")
#     uvicorn.run(app)

    #demo.launch(server_name="0.0.0.0", server_port=7860)