Spaces:
Runtime error
Runtime error
File size: 6,257 Bytes
8eeee11 235b9a3 8eeee11 235b9a3 992f75b 8eeee11 992f75b ffd9cf3 992f75b ffd9cf3 992f75b 368b420 ffd9cf3 992f75b 135b6f6 992f75b 8eeee11 368b420 992f75b 368b420 8eeee11 368b420 8eeee11 1c4c9dd 135b6f6 368b420 8eeee11 1c4c9dd d213f0f 86988fa d213f0f 8eeee11 86988fa 8eeee11 368b420 8eeee11 86988fa 8eeee11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
# -*- coding: utf-8 -*-
"""Deploy OceanApp demo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1j0T8gdLIa0X8fzkIgFpXDoU27BF49RUz?usp=sharing

# Modelo
YOLO es una familia de modelos de detecci贸n de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite.
## Gradio Inferencia

Este Notebook se acelera opcionalmente con un entorno de ejecuci贸n de GPU
----------------------------------------------------------------------
YOLOv5 Gradio demo
*Author: Ultralytics LLC and Gradio*
# C贸digo
"""
#!pip install -qr https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt gradio # install dependencies
import gradio as gr
import pandas as pd
import torch
import logging
import json
import re
from PIL import Image
# Images
torch.hub.download_url_to_file('https://i.pinimg.com/564x/18/0b/00/180b00e454362ff5caabe87d9a763a6f.jpg', 'ejemplo1.jpg')
torch.hub.download_url_to_file('https://i.pinimg.com/564x/3b/2f/d4/3b2fd4b6881b64429f208c5f32e5e4be.jpg', 'ejemplo2.jpg')
# Model
#model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
#model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') # local model o google colab
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True, autoshape=True) # local model o google colab
#model = torch.hub.load('path/to/yolov5', 'custom', path='/content/yolov56.pt', source='local') # local repo
def removeStr(string):
return string.replace(" ", "")
def listJSON(a,b,c,d,e,f):
x = re.findall("obo mar", d)
y = re.findall("elica", d)
z = re.findall("elica", f)
if x:
d = 'Lobo marino'
if y:
d = 'Pelicano'
if z:
f = 'Pelicano'
if(d=='Lobo marino' or d=='Pelicano'):
if d =='Pelicano\nSp' or d =='Pelicano\nS':
d = 'Pelicano'
if f!='Pelicano':
strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"}]'
else:
strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"},{"quantity":"'+str(removeStr(e))+'","description":"'+str(f)+'"}]'
strlist = '{"image":"'+str(removeStr(a))+'","size":"'+str(removeStr(b))+'",'+strlista+'}'
json_string = json.loads(strlist)
return json_string
def arrayLista(a,b,c,d):
x = re.findall("obo mar", b)
y = re.findall("elica", b)
z = re.findall("elica", d)
if x:
b = 'Lobo marino'
if y:
b = 'Pelicano'
if z:
d = 'Pelicano'
if(b=='Lobo marino' or b=='Pelicano'):
strlist =[]
strlist2 =[]
strlist.append(removeStr(a))
strlist.append(b)
if d=='Pelicano':
strlist2.append(removeStr(c))
strlist2.append(d)
strlista = [strlist,strlist2]
df = pd.DataFrame(strlista,columns=['Cantidad','Especie'])
return df
def yolo(size, iou, conf, im):
try:
'''Wrapper fn for gradio'''
g = (int(size) / max(im.size)) # gain
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
model.iou = iou
model.conf = conf
results2 = model(im) # inference
results2.render() # updates results.imgs with boxes and labels
results3 = str(results2)
lista = listJSON(results3[0:9], results3[11:18] ,results3[19:21],results3[22:32], results3[35:37], results3[37:45])
lista2 = arrayLista(results3[19:21],results3[22:32], results3[35:37], results3[37:45])
return Image.fromarray(results2.ims[0]), lista2, lista
except Exception as e:
logging.error(e, exc_info=True)
#------------ Interface-------------
in1 = gr.inputs.Radio(['640', '1280'], label="Tama帽o de la imagen", default='640', type='value')
in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.45, label='NMS IoU threshold')
in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o threshold')
in4 = gr.inputs.Image(type='pil', label="Original Image")
out2 = gr.outputs.Image(type="pil", label="Identificaci贸n con Yolov5")
out3 = gr.outputs.Dataframe(label="Descripci贸n", headers=['Cantidad','Especie'])
out4 = gr.outputs.JSON(label="JSON")
#-------------- Text-----
title = 'OceanApp'
description = """
<p>
<center>
<p>Sistema para el reconocimiento de las especies en la pesca acompa帽ante de cerco, utilizando redes neuronales convolucionales para una empresa del sector pesquero en los puertos de callao y paracas.</p>
<p><b>Nota</b>: Este modelo solo acepta imagenes de <b>Lobos marinos</b> o <b>Pelicanos</b> proporcionados por empresas peruanas.</p>
<center>
<img src="https://i.pinimg.com/564x/3e/b8/f7/3eb8f7c348dffd7b3dffcafe81fbf2a6.jpg" alt="logo" width="250"/>
</center>
</center>
</p>
"""
article ="<p style='text-align: center'><a href='' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://colab.research.google.com/drive/1j0T8gdLIa0X8fzkIgFpXDoU27BF49RUz?usp=sharing' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/MssLune/OceanApp-Model' target='_blank'>Repo Github</a></p></center></p>"
examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']]
iface = gr.Interface(yolo, inputs=[in1, in2, in3, in4], outputs=[out2,out3,out4], title=title, description=description, article=article, examples=examples,theme="huggingface", analytics_enabled=False).launch(
debug=True)
iface.launch()
"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).
## Citation
[](https://zenodo.org/badge/latestdoi/264818686)
""" |