File size: 13,149 Bytes
8fb6272
 
 
 
 
 
 
 
62fec37
8fb6272
b181644
1ccfc24
8fb6272
 
 
 
b181644
8fb6272
 
b181644
8fb6272
b181644
577120c
b181644
8fb6272
 
 
 
b181644
 
8fb6272
577120c
62fec37
577120c
b181644
8fb6272
 
 
b181644
8fb6272
 
b181644
62fec37
577120c
8fb6272
b181644
8fb6272
 
b181644
8fb6272
1ccfc24
 
b181644
 
 
 
397303b
8fb6272
b181644
8fb6272
 
 
 
 
 
62fec37
b181644
8fb6272
 
62fec37
8fb6272
 
 
 
 
397303b
8fb6272
 
 
397303b
8fb6272
 
 
 
 
 
 
 
 
 
 
397303b
1ccfc24
 
 
8fb6272
397303b
8fb6272
397303b
8fb6272
397303b
8fb6272
810f531
397303b
8fb6272
 
 
 
 
62fec37
b181644
62fec37
 
 
b181644
 
 
 
 
 
62fec37
397303b
b181644
 
1ccfc24
 
b181644
1ccfc24
 
43a30ac
 
 
 
 
 
 
 
b181644
 
8fb6272
1ccfc24
 
a83ece3
 
 
 
1ccfc24
 
b181644
1ccfc24
8fb6272
 
1ccfc24
 
 
 
 
 
 
43a30ac
1ccfc24
397303b
1ccfc24
 
 
 
d78970a
8fb6272
 
 
 
 
810f531
 
397303b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810f531
 
 
a83ece3
 
 
 
 
810f531
 
 
a83ece3
810f531
 
a83ece3
810f531
 
a83ece3
 
 
 
 
 
810f531
a83ece3
810f531
 
 
 
 
a83ece3
 
810f531
a83ece3
810f531
 
 
 
62fec37
810f531
 
 
 
 
 
62fec37
810f531
62fec37
810f531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
import torch
import torch.nn.functional as F
import logging
import os
import gc
import numpy as np
import cv2
from PIL import Image
from typing import List, Dict, Any, Tuple
from transformers import Owlv2Processor, Owlv2ForObjectDetection
from .base import BaseDetector
import time

logger = logging.getLogger(__name__)

class WeaponDetectorGPU(BaseDetector):
    """Detector de armas otimizado para GPU."""
    
    def __init__(self):
        """Inicializa o detector."""
        super().__init__()
        self.default_resolution = 640
        self.device = None  # Será configurado em _initialize
        self._initialize()
    
    def _initialize(self):
        try:
            # Configurar device
            if not torch.cuda.is_available():
                raise RuntimeError("CUDA não está disponível!")
            
            # Configurar device corretamente
            self.device = torch.device("cuda:0")  # Usar device CUDA
            
            # Carregar modelo e processador
            logger.info("Carregando modelo e processador...")
            model_name = "google/owlv2-base-patch16"
            
            self.owlv2_processor = Owlv2Processor.from_pretrained(model_name)
            self.owlv2_model = Owlv2ForObjectDetection.from_pretrained(
                model_name,
                torch_dtype=torch.float16,
                device_map={"": 0}  # Mapear todo o modelo para GPU 0
            )
            
            # Otimizar modelo
            self.owlv2_model.eval()
            
            # Processar queries
            self.text_queries = self._get_detection_queries()
            logger.info(f"Queries carregadas: {self.text_queries}")  # Log das queries
            
            self.processed_text = self.owlv2_processor(
                text=self.text_queries,
                return_tensors="pt",
                padding=True
            ).to(self.device)
            
            logger.info("Inicialização GPU completa!")
            self._initialized = True
            
        except Exception as e:
            logger.error(f"Erro na inicialização GPU: {str(e)}")
            raise

    def detect_objects(self, image: Image.Image, threshold: float = 0.3) -> List[Dict]:
        """Detecta objetos em uma imagem."""
        try:
            # Pré-processar imagem
            image = self._preprocess_image(image)
            
            # Processar imagem
            image_inputs = self.owlv2_processor(
                images=image,
                return_tensors="pt"
            ).to(self.device)
            
            # Inferência
            with torch.no_grad():
                outputs = self.owlv2_model(**{**image_inputs, **self.processed_text})
                
                target_sizes = torch.tensor([image.size[::-1]], device=self.device)
                results = self.owlv2_processor.post_process_grounded_object_detection(
                    outputs=outputs,
                    target_sizes=target_sizes,
                    threshold=threshold
                )[0]
            
            # Processar detecções
            detections = []
            if len(results["scores"]) > 0:
                for score, box, label in zip(results["scores"], results["boxes"], results["labels"]):
                    score_val = score.item()
                    if score_val >= threshold:
                        label_idx = min(label.item(), len(self.text_queries) - 1)
                        detections.append({
                            "confidence": round(score_val * 100, 2),
                            "box": [int(x) for x in box.tolist()],
                            "label": self.text_queries[label_idx]
                        })
                        logger.debug(f"Detecção: {self.text_queries[label_idx]} ({score_val * 100:.2f}%)")
            
            # Aplicar NMS nas detecções
            return self._apply_nms(detections)
            
        except Exception as e:
            logger.error(f"Erro em detect_objects: {str(e)}")
            return []

    def _get_best_device(self) -> torch.device:
        """Retorna o melhor dispositivo disponível."""
        if torch.cuda.is_available():
            return torch.device("cuda:0")
        return torch.device("cpu")

    def _clear_gpu_memory(self):
        """Limpa memória GPU."""
        torch.cuda.empty_cache()
        gc.collect()

    def process_video(self, video_path: str, fps: int = None, threshold: float = 0.3, resolution: int = 640) -> Tuple[str, Dict]:
        """Processa um vídeo para detecção de objetos."""
        metrics = {
            "total_time": 0,
            "frame_extraction_time": 0,
            "analysis_time": 0,
            "frames_analyzed": 0,
            "video_duration": 0,
            "device_type": "GPU",
            "detections": [],
            "technical": {
                "model": "owlv2-base-patch16",
                "input_size": f"{resolution}x{resolution}",
                "nms_threshold": 0.5,
                "preprocessing": "optimized",
                "early_stop": False
            }
        }
        
        try:
            start_time = time.time()
            
            # Limpar cache de GPU antes de começar
            torch.cuda.empty_cache()
            gc.collect()
            
            # Extrair frames
            t0 = time.time()
            frames = self.extract_frames(video_path, fps or 2, resolution)
            metrics["frame_extraction_time"] = time.time() - t0
            metrics["frames_analyzed"] = len(frames)
            
            if not frames:
                logger.warning("Nenhum frame extraído do vídeo")
                return video_path, metrics
            
            # Calcular duração do vídeo
            metrics["video_duration"] = len(frames) / (fps or 2)
            
            # Processar frames
            t0 = time.time()
            detections_by_frame = self._process_frames(frames, fps, threshold)
            
            # Atualizar métricas finais
            metrics["analysis_time"] = time.time() - t0
            metrics["total_time"] = time.time() - start_time
            metrics["detections"] = detections_by_frame
            
            return video_path, metrics
            
        except Exception as e:
            logger.error(f"Erro ao processar vídeo: {str(e)}")
            return video_path, metrics

    def _process_frames(self, frames: List[np.ndarray], fps: int, threshold: float) -> List[Dict]:
        """Processa frames do vídeo para detecção."""
        detections_by_frame = []
        
        for i, frame in enumerate(frames):
            try:
                # Preparar frame
                frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                frame_pil = self._preprocess_image(frame_pil)
                
                # Processar frame
                inputs = self.owlv2_processor(
                    images=frame_pil,
                    return_tensors="pt"
                ).to(self.device)
                
                # Inferência
                with torch.no_grad():
                    outputs = self.owlv2_model(**{**inputs, **self.processed_text})
                    target_sizes = torch.tensor([frame_pil.size[::-1]], device=self.device)
                    results = self.owlv2_processor.post_process_grounded_object_detection(
                        outputs=outputs,
                        target_sizes=target_sizes,
                        threshold=threshold
                    )[0]
                    
                    # Processar resultados
                    if len(results["scores"]) > 0:
                        frame_detections = []
                        for score, box, label in zip(results["scores"], results["boxes"], results["labels"]):
                            score_val = score.item()
                            if score_val >= threshold:
                                label_idx = min(label.item(), len(self.text_queries) - 1)
                                frame_detections.append({
                                    "confidence": round(score_val * 100, 2),
                                    "box": [int(x) for x in box.tolist()],
                                    "label": self.text_queries[label_idx],
                                    "frame": i,
                                    "timestamp": i / (fps or 2)
                                })
                        
                        if frame_detections:
                            detections_by_frame.extend(self._apply_nms(frame_detections))
                
            except Exception as e:
                logger.error(f"Erro ao processar frame {i}: {str(e)}")
                continue
                
            finally:
                # Liberar memória
                if 'inputs' in locals():
                    del inputs
                if 'outputs' in locals():
                    del outputs
                torch.cuda.empty_cache()
                
            # Log de progresso
            if i % 10 == 0:
                logger.info(f"Processados {i}/{len(frames)} frames")
        
        return detections_by_frame

    def _preprocess_image(self, image: Image.Image) -> Image.Image:
        """Pré-processa a imagem para o formato esperado pelo modelo."""
        try:
            # Cache de tamanho para evitar redimensionamentos desnecessários
            if hasattr(self, '_last_size') and self._last_size == image.size:
                return image

            # Converter para RGB se necessário usando conversão direta
            if image.mode != 'RGB':
                image = image.convert('RGB')

            # Redimensionar mantendo proporção com otimização de memória
            target_size = (self.default_resolution, self.default_resolution)
            if image.size != target_size:
                # Calcular novo tamanho uma única vez
                ratio = min(target_size[0] / image.size[0], target_size[1] / image.size[1])
                new_size = tuple(int(dim * ratio) for dim in image.size)
                
                # Redimensionar diretamente para o tamanho final se possível
                if new_size == target_size:
                    image = image.resize(target_size, Image.Resampling.BILINEAR)
                else:
                    # Criar imagem com padding em uma única operação
                    new_image = Image.new('RGB', target_size, (0, 0, 0))
                    image = image.resize(new_size, Image.Resampling.BILINEAR)
                    paste_x = (target_size[0] - new_size[0]) // 2
                    paste_y = (target_size[1] - new_size[1]) // 2
                    new_image.paste(image, (paste_x, paste_y))
                    image = new_image

            # Armazenar tamanho para cache
            self._last_size = image.size
            return image
            
        except Exception as e:
            logger.error(f"Erro no pré-processamento: {str(e)}")
            return image

    def _apply_nms(self, detections: List[Dict], iou_threshold: float = 0.5) -> List[Dict]:
        """Aplica Non-Maximum Suppression nas detecções."""
        try:
            if not detections or len(detections) <= 1:
                return detections

            # Extrair scores e boxes
            scores = torch.tensor([d["confidence"] for d in detections], device=self.device)
            boxes = torch.tensor([[d["box"][0], d["box"][1], d["box"][2], d["box"][3]] 
                                for d in detections], device=self.device)

            # Ordenar por score
            _, order = scores.sort(descending=True)
            keep = []

            while order.numel() > 0:
                if order.numel() == 1:
                    keep.append(order.item())
                    break

                i = order[0]
                keep.append(i.item())

                # Calcular IoU com os boxes restantes
                box1 = boxes[i]
                box2 = boxes[order[1:]]
                
                # Calcular interseção
                left = torch.max(box1[0], box2[:, 0])
                top = torch.max(box1[1], box2[:, 1])
                right = torch.min(box1[2], box2[:, 2])
                bottom = torch.min(box1[3], box2[:, 3])
                
                width = torch.clamp(right - left, min=0)
                height = torch.clamp(bottom - top, min=0)
                inter = width * height

                # Calcular união
                area1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
                area2 = (box2[:, 2] - box2[:, 0]) * (box2[:, 3] - box2[:, 1])
                union = area1 + area2 - inter

                # Calcular IoU
                iou = inter / union
                mask = iou <= iou_threshold
                order = order[1:][mask]

            # Retornar detecções filtradas
            return [detections[i] for i in keep]

        except Exception as e:
            logger.error(f"Erro ao aplicar NMS: {str(e)}")
            return detections