Spaces:
Runtime error
Runtime error
#===========================================================================# | |
#===========================================================================# | |
# SETUP INSTALLATIONS | |
#===========================================================================# | |
#===========================================================================# | |
import os | |
import sys | |
def install_packages(): | |
# Atualizar pip | |
os.system(f"pip install --upgrade pip") | |
# Instalar pacotes necessários | |
packages = [ | |
"opencv-python-headless==4.10.0.82", | |
"ultralytics==8.3", | |
"telethon==1.37.0", | |
"cryptography==43.0.3", | |
"nest_asyncio", | |
"torch==2.5.0 torchvision==0.20.0 torchaudio==2.5.0 --index-url https://download.pytorch.org/whl/cpu", | |
"paddlepaddle==2.6.2 -f https://paddlepaddle.org.cn/whl/mkl/avx/stable.html", | |
"paddleocr==2.9.1", | |
"prettytable==3.12", | |
"gradio==5.6", | |
] | |
for package in packages: | |
print(f"Installing {package}...") | |
os.system(f"pip install {package}") | |
print("All packages installed successfully.") | |
install_packages() | |
#===========================================================================# | |
#===========================================================================# | |
# PLAY THE CLASS | |
#===========================================================================# | |
#===========================================================================# | |
import gradio as gr | |
import numpy as np | |
import cv2 | |
from collections import deque, OrderedDict, defaultdict | |
from ultralytics import YOLO | |
from paddleocr import PaddleOCR | |
import asyncio | |
import threading | |
from telethon import TelegramClient | |
from cryptography.fernet import Fernet | |
import json | |
import nest_asyncio | |
from prettytable import PrettyTable | |
import time | |
# Aplicar nest_asyncio para permitir loops de eventos aninhados | |
nest_asyncio.apply() | |
# Função para obter o caminho de recursos | |
def resource_path(relative_path): | |
return os.path.join(os.getcwd(), relative_path) | |
# Classe adaptada para processar frames individuais | |
class LicensePlateProcessor: | |
def __init__(self): | |
# Carregar modelo YOLO | |
self.last_frame_time = None # Armazena o tempo do último frame processado | |
self.fps = 0 # Inicializa o FPS como zero | |
model_path = resource_path('best.pt') | |
self.model = YOLO(model_path, task='detect') | |
# Carregar PaddleOCR | |
paddleocr_model_dir = resource_path('paddleocr_models') | |
self.ocr = PaddleOCR( | |
use_angle_cls=True, | |
use_gpu=False, | |
lang='en', | |
det_algorithm='DB', | |
rec_algorithm='CRNN', | |
show_log=False, | |
rec_model_dir=os.path.join(paddleocr_model_dir, 'en_PP-OCRv3_rec_infer'), | |
det_model_dir=os.path.join(paddleocr_model_dir, 'en_PP-OCRv3_det_infer'), | |
cls_model_dir=os.path.join(paddleocr_model_dir, 'ch_ppocr_mobile_v2.0_cls_infer') | |
) | |
# Carregar dados encriptados | |
self.load_encrypted_data() | |
# Inicializar TelegramClient | |
self.telegram_client = TelegramClient(self.session_name, self.api_id, self.api_hash) | |
self.telegram_client.start() | |
# Memória de placas | |
self.plates_memory = deque(maxlen=500) | |
self.last_sixteen_plates = OrderedDict() | |
# Filas para placas aguardando resposta | |
self.waiting_plates = {} | |
# Iniciar loop asyncio em uma thread separada | |
self.loop = asyncio.get_event_loop() | |
self.loop_thread = threading.Thread(target=self.start_loop, daemon=True) | |
self.loop_thread.start() | |
# Iniciar tarefa assíncrona para verificar respostas | |
asyncio.run_coroutine_threadsafe(self.check_responses(), self.loop) | |
def start_loop(self): | |
"""Inicia o loop de eventos asyncio.""" | |
asyncio.set_event_loop(self.loop) | |
self.loop.run_forever() | |
def load_encrypted_data(self): | |
"""Carrega e decripta os dados sensíveis.""" | |
encrypted_data_path = resource_path('SECRET_DATA.enc') | |
decrypt_key_path = resource_path('decrypt_key.txt') | |
with open(encrypted_data_path, "rb") as f: | |
data_encrypted = f.read() | |
with open(decrypt_key_path, "r") as key_file: | |
key_str = key_file.read().strip() | |
key = key_str.encode('utf-8') | |
cipher = Fernet(key) | |
data_decrypted = cipher.decrypt(data_encrypted) | |
config = json.loads(data_decrypted.decode()) | |
self.api_id = config["api_id"] | |
self.api_hash = config["api_hash"] | |
self.phone_number = config["phone_number"] | |
self.session_name = 'orlandini_hf.session' #resource_path(config["session_name"]) | |
def has_seven(self, plate_text): | |
"""Verifica o status de uma placa.""" | |
# Verificar se a placa está na memória | |
for item in self.plates_memory: | |
if item['plate'] == plate_text: | |
return item['has_seven'] | |
# Verificar se está aguardando resposta | |
if plate_text in self.waiting_plates: | |
return self.waiting_plates[plate_text] | |
else: | |
# Enviar placa para o bot do Telegram | |
self.waiting_plates[plate_text] = 'Waiting' | |
asyncio.run_coroutine_threadsafe(self.send_plate(plate_text), self.loop) | |
return 'Waiting' | |
async def send_plate(self, plate_text): | |
"""Envia a placa para o bot do Telegram.""" | |
chat_identifier = '@LT_BUSCABOT' | |
try: | |
await self.telegram_client.connect() | |
await self.telegram_client.send_message(chat_identifier, plate_text) | |
print(f"Enviado para Telegram: {plate_text}") | |
except Exception as e: | |
print(f"Erro ao enviar placa {plate_text}: {e}") | |
async def check_responses(self): | |
"""Verifica respostas do bot do Telegram.""" | |
while True: | |
if not self.waiting_plates: | |
await asyncio.sleep(1) | |
continue | |
chat_identifier = '@LT_BUSCABOT' | |
limit = 20 | |
try: | |
await self.telegram_client.connect() | |
messages = await self.telegram_client.get_messages(chat_identifier, limit=limit) | |
# print(messages) | |
for message in messages: | |
text = message.text | |
# Check if message is a response to one of our plates | |
for plate in list(self.waiting_plates.keys()): | |
if plate.lower() in text.lower(): | |
# Found response for this plate | |
if 'Placa Localizada' in text: | |
self.waiting_plates.pop(plate) | |
self.plates_memory.append({'plate': plate, 'has_seven': False}) | |
elif 'não foi encontrada' in text: | |
self.waiting_plates.pop(plate) | |
self.plates_memory.append({'plate': plate, 'has_seven': True}) | |
elif 'não é uma placa válida' in text: | |
self.waiting_plates.pop(plate) | |
self.plates_memory.append({'plate': plate, 'has_seven': 'Non Valid'}) | |
# Update the plate status in the displayed grid | |
self.update_displayed_plate(plate) | |
except Exception as e: | |
print(f"Error checking responses: {e}") | |
await asyncio.sleep(2) | |
def update_displayed_plate(self, plate): | |
"""Atualiza o status da placa exibida na tabela.""" | |
for item in self.plates_memory: | |
if item['plate'] == plate: | |
if item['has_seven'] == 'Non Valid': | |
self.last_sixteen_plates.pop(plate, None) | |
else: | |
self.last_sixteen_plates[plate] = item['has_seven'] | |
break | |
# Manter apenas as últimas 16 placas válidas | |
self.last_sixteen_plates = OrderedDict((p, s) for p, s in self.last_sixteen_plates.items() if s != 'Non Valid') | |
while len(self.last_sixteen_plates) > 16: | |
self.last_sixteen_plates.popitem(last=False) | |
def remove_non_alphanumeric(self, text): | |
"""Remove caracteres não alfanuméricos.""" | |
return ''.join(char for char in text if char.isalnum()) | |
def has_number_and_letter(self, text): | |
"""Verifica se o texto contém letras e números.""" | |
return text.isalnum() and not text.isalpha() and not text.isdigit() | |
def process_license_plates(self, ocr_result): | |
"""Processa o resultado do OCR para identificar placas.""" | |
def is_overlapping(box1, box2): | |
x1_min = min(point[0] for point in box1) | |
x1_max = max(point[0] for point in box1) | |
y1_min = min(point[1] for point in box1) | |
y1_max = max(point[1] for point in box1) | |
x2_min = min(point[0] for point in box2) | |
x2_max = max(point[0] for point in box2) | |
y2_min = min(point[1] for point in box2) | |
y2_max = max(point[1] for point in box2) | |
# Verificar sobreposição em X | |
overlap_x = (x1_max + 2 >= x2_min - 2) and (x1_min - 2 <= x2_max + 2) | |
# Verificar sobreposição em Y | |
overlap_y = (y1_max + 2 >= y2_min - 2) and (y1_min - 2 <= y2_max + 2) | |
return overlap_x and overlap_y | |
# Extrair caixas e textos | |
boxes = [item[0] for item in ocr_result[0]] | |
strings = [item[1][0] for item in ocr_result[0]] | |
# Separar em placas completas e segmentos parciais | |
full_plates = [] | |
partial_segments = [] | |
for box, s in zip(boxes, strings): | |
if len(s) >= 7: | |
full_plates.append((box, s)) | |
elif len(s) in [3, 4]: | |
partial_segments.append((box, s)) | |
# Processar segmentos parciais | |
n = len(partial_segments) | |
parent = list(range(n)) | |
def find(i): | |
while parent[i] != i: | |
parent[i] = parent[parent[i]] | |
i = parent[i] | |
return i | |
def union(i, j): | |
pi = find(i) | |
pj = find(j) | |
if pi != pj: | |
parent[pj] = pi | |
# Construir grupos com base na sobreposição | |
for i in range(n): | |
for j in range(i + 1, n): | |
if is_overlapping(partial_segments[i][0], partial_segments[j][0]): | |
union(i, j) | |
# Agrupar as caixas | |
groups = defaultdict(list) | |
for i in range(n): | |
groups[find(i)].append(partial_segments[i]) | |
# Concatenar textos em cada grupo | |
concatenated_partials = [] | |
for group in groups.values(): | |
# Ordenar com base na coordenada Y mínima (de cima para baixo) | |
sorted_group = sorted(group, key=lambda x: min(point[1] for point in x[0])) | |
concatenated = ''.join([s for box, s in sorted_group]) | |
concatenated_partials.append(concatenated) | |
# Adicionar placas completas | |
all_plates = [s for box, s in full_plates] + concatenated_partials | |
return all_plates | |
def perform_ocr(self, img_array): | |
"""Realiza OCR na imagem e retorna os textos detectados.""" | |
if img_array.shape[0] == 0 or img_array.shape[1] == 0: | |
return None | |
result = self.ocr.ocr(img_array, cls=True) | |
if not result[0]: | |
return None | |
return self.process_license_plates(result) | |
def save_plate(self, plate_text): | |
"""Salva o status da placa.""" | |
# Verificar se a placa está na memória | |
for item in self.plates_memory: | |
if item['plate'] == plate_text: | |
return item['has_seven'] | |
# Obter o status a partir da função has_seven | |
has_seven = self.has_seven(plate_text) | |
if has_seven != 'Waiting': | |
self.plates_memory.append({'plate': plate_text, 'has_seven': has_seven}) | |
return has_seven | |
def process_frame(self, frame): | |
"""Processa um frame individual da webcam.""" | |
current_time = time.time() | |
# Calcula a diferença temporal e o FPS | |
if self.last_frame_time: | |
time_diff = current_time - self.last_frame_time | |
if time_diff > 0: # Evita divisão por zero | |
self.fps = 1 / time_diff | |
# Atualiza o tempo do último frame | |
self.last_frame_time = current_time | |
print(f"FPS: {self.fps:.2f}") | |
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
# Detectar placas usando YOLO | |
results = self.model.predict(img, imgsz=256, conf=0.5) | |
plates_list = [] | |
for res in results[0].boxes.data: | |
x1, y1, x2, y2 = int(res[0]), int(res[1]), int(res[2]), int(res[3]) | |
# Ajustar o recorte para incluir padding | |
if (y2 - y1) > 0 and (x2 - x1) > 0: | |
prod = img.shape[0] * img.shape[1] | |
adj = round(5 * prod / 315000) | |
y1_adj = max(y1 - adj, 0) | |
y2_adj = min(y2 + adj, img.shape[0]) | |
x1_adj = max(x1 - adj, 0) | |
x2_adj = min(x2 + adj, img.shape[1]) | |
crop = img[y1_adj:y2_adj, x1_adj:x2_adj] | |
# Redimensionar o recorte | |
scale_factor = 3 | |
new_size = (int(crop.shape[1] * scale_factor), int(crop.shape[0] * scale_factor)) | |
resized_crop = cv2.resize(crop, new_size, interpolation=cv2.INTER_LINEAR) | |
# Realizar OCR | |
text_list = self.perform_ocr(resized_crop) | |
if text_list is None: | |
continue | |
for text in text_list: | |
text = self.remove_non_alphanumeric(text) | |
if text and self.has_number_and_letter(text): | |
has_seven = self.save_plate(text) | |
plates_list.append((text, has_seven)) | |
if text not in self.last_sixteen_plates and has_seven != 'Non Valid': | |
if len(self.last_sixteen_plates) >= 16: | |
self.last_sixteen_plates.popitem(last=False) | |
self.last_sixteen_plates[text] = has_seven | |
# Atualizar a exibição | |
return self.get_display_table() | |
def get_display_table(self): | |
"""Retorna a tabela das últimas 16 placas detectadas.""" | |
if not self.last_sixteen_plates: | |
return "Nenhuma placa detectada ainda." | |
else: | |
table = PrettyTable() | |
table.field_names = ["Placa", "Status"] | |
for plate, status in self.last_sixteen_plates.items(): | |
if status != 'Waiting' and status != 'Non Valid': | |
status_text = 'Okay' if status == True else '!EITA!' | |
table.add_row([plate, status_text]) | |
return table.get_string() | |
#===========================================================================# | |
#===========================================================================# | |
# PLAY GRADIO | |
#===========================================================================# | |
#===========================================================================# | |
def js_to_prefere_the_back_camera_of_mobilephones(): | |
custom_html = """ | |
<script> | |
const originalGetUserMedia = navigator.mediaDevices.getUserMedia.bind(navigator.mediaDevices); | |
navigator.mediaDevices.getUserMedia = (constraints) => { | |
if (!constraints.video.facingMode) { | |
constraints.video.facingMode = {ideal: "environment"}; | |
} | |
return originalGetUserMedia(constraints); | |
}; | |
</script> | |
""" | |
return custom_html | |
# Instanciar o processador | |
processor = LicensePlateProcessor() | |
# # Função para ser chamada pelo Gradio | |
# def process_webcam_frame(frame): | |
# return processor.process_frame(frame) | |
# Modificar a função process_webcam_frame | |
def process_webcam_frame(frame, resize_factor): | |
if frame is None: | |
return "Frame não detectado" | |
# Redimensionar o frame com o fator fornecido | |
original_size = frame.shape[:2] # Altura e largura originais | |
new_size = ( | |
int(original_size[1] * resize_factor), | |
int(original_size[0] * resize_factor), | |
) | |
resized_frame = cv2.resize(frame, new_size, interpolation=cv2.INTER_LINEAR) | |
# Printar as dimensões do frame processado | |
print(f"Original frame size: {original_size}, Resized frame size: {new_size}") | |
# Processar o frame redimensionado | |
return processor.process_frame(resized_frame) | |
# resize_factor = gr.Slider( | |
# minimum=0.1, maximum=3.0, step=0.1, value=1.0, label="Fator de Redimensionamento" | |
# ) | |
with gr.Blocks(head=js_to_prefere_the_back_camera_of_mobilephones()) as demo: | |
with gr.Row(): | |
with gr.Column(): | |
# Criar entrada para webcam | |
input_img = gr.Image( | |
label="Webcam", | |
sources="webcam", | |
streaming=True, | |
mirror_webcam=False | |
) | |
# Associar o slider ao layout | |
resize_factor_input = gr.Slider(minimum=0.1, maximum=1.1, step=0.01, value=1.0, label="Fator de Redimensionamento") | |
with gr.Column(): | |
# Caixa de texto para saída | |
output_text = gr.Textbox( | |
label="Últimas 16 Placas Detectadas", | |
lines=20 | |
) | |
# Atualizar stream para incluir o novo input | |
input_img.stream( | |
process_webcam_frame, | |
inputs=[input_img, resize_factor_input], | |
outputs=output_text, | |
time_limit=None, | |
stream_every=0.2, | |
concurrency_limit=None | |
) | |
demo.launch() | |
# demo.launch(debug=True, share=True) |