File size: 4,780 Bytes
7709ecd
 
 
 
 
 
 
833f81f
 
 
 
 
 
 
 
 
 
8caafcc
7709ecd
833f81f
7709ecd
 
453c887
833f81f
453c887
833f81f
 
453c887
833f81f
453c887
833f81f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8caafcc
5ebb6b7
833f81f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09dada6
7709ecd
216b825
 
 
 
 
833f81f
216b825
 
7709ecd
 
833f81f
 
7709ecd
09dada6
8caafcc
7709ecd
09dada6
 
 
833f81f
 
 
 
 
 
 
09dada6
 
833f81f
3654cd1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import os
import re
import json
import pandas as pd
import gradio as gr
import torch
from PIL import Image
from PIL import ImageFile
import logging

# Configurar el logging para escribir en un archivo log
logging.basicConfig(
    filename="output.log",
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S"
)

# Images
torch.hub.download_url_to_file('https://i.pinimg.com/originals/7f/5e/96/7f5e9657c08aae4bcd8bc8b0dcff720e.jpg', 'ejemplo1.jpg')
torch.hub.download_url_to_file('https://i.pinimg.com/originals/c2/ce/e0/c2cee05624d5477ffcf2d34ca77b47d1.jpg', 'ejemplo2.jpg')

# Model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True, autoshape=True, trust_repo=True)

def to_json(results):
    detail = []

    results_df = to_dataframe(results)

    for index, row in results_df.iterrows():
        item = {
            "quantity": row['Cantidad'],
            "description": row['Especie']
        }
        detail.append(item)

    data = {
        "image": results.files[0],
        "size": f"{results.s[2]}x{results.s[3]}",
        "detail": detail
    }

    return data, results_df

def to_dataframe(results):
    labels_map = {
        'Aedes': "Aedes",
        'Mosquito': "Mosquito",
        'Mosca': "Mosca",
    }
    labels = list(labels_map.keys())

    columns_name = {'class': 'Cantidad', 'name': 'Especie'}
    results_df = results.pandas().xyxy[0][['class','name']].groupby('name').count().reset_index().rename(columns=columns_name)
    results_df = pd.merge(pd.DataFrame(labels, columns=['Especie']), results_df, how='left', on='Especie').fillna(0)
    results_df['Cantidad'] = results_df['Cantidad'].astype(int)
    results_df['Especie'] = results_df['Especie'].map(labels_map)
    return results_df

def yolo(size, iou, conf, im):
    try:
        '''Wrapper fn for gradio'''
        g = (int(size) / max(im.size))  # gain
        im = im.resize(tuple(int(x * g) for x in im.size), Image.LANCZOS) # resize with antialiasing
        
        model.iou = iou
        model.conf = conf
        
        results2 = model(im)  # inference
        results2.render()  # updates results.imgs with boxes and labels
        lista, lista2 = to_json(results2)
        logging.info(f"Imagen procesada satisfactoriamente: {lista}")
        return Image.fromarray(results2.ims[0]), lista2, lista
    except Exception as err:
        logging.error(f"Error durante la predicción: {err}")
        return None, None, None

#------------ Interface-------------
in1 = gr.inputs.Radio(['640', '1280'], label="Tamaño de la imagen", default='640', type='value')
in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.25, label='NMS IoU threshold')
in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o  threshold')
in4 = gr.inputs.Image(type='pil', label="Original Image")

out2 = gr.outputs.Image(type="pil", label="YOLOv5")
out3 = gr.outputs.Dataframe(label="Cantidad_especie", headers=['Cantidad','Especie'], type="pandas")
out4 = gr.outputs.JSON(label="JSON")
#-------------- Text-----
title = 'Trampas Barceló'
description = '<p><center>Sistemas de Desarrollado por Subsecretaría de Modernización del Municipio de Vicente López. Advertencia solo usar fotos provenientes de las trampas Barceló, no de celular o foto de internet.<img src="https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png" alt="logo" width="250"/></center></p>'

article ="<p style='text-align: center'><a href='https://docs.google.com/presentation/d/1T5CdcLSzgRe8cQpoi_sPB4U170551NGOrZNykcJD0xU/edit?usp=sharing' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://drive.google.com/drive/folders/1owACN3HGIMo4zm2GQ_jf-OhGNeBVRS7l?usp=sharing ' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/Municipalidad-de-Vicente-Lopez/Trampa_Barcelo' target='_blank'>Repo Github</a></p></center></p>"

examples = [['640',0.25, 0.5,'ejemplo1.jpg'], ['640',0.25, 0.5,'ejemplo2.jpg']]

iface = gr.Interface(yolo,
                    inputs=[in1, in2, in3, in4],
                    outputs=[out2,out3,out4], title=title,
                        description=description,
                        article=article,
                        examples=examples,
                        #analytics_enabled=False,
                        #allow_flagging="manual",
                        #flagging_options=["Correcto", "Incorrecto", "Casi correcto", "Error", "Otro"],
                        #flagging_callback=hf_writer
                    )

#iface.queue(default_concurrency_limit=5)
iface.launch(enable_queue=True, debug=True, server_port=7860, server_name='0.0.0.0')