Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,22 @@
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image, ImageOps
|
3 |
-
import torch
|
4 |
from transformers import CLIPProcessor, CLIPModel
|
|
|
|
|
5 |
|
6 |
-
# ─── 1) Carrega modelo e processor CLIP fine-tuned ───
|
7 |
MODEL_ID = "EduFalcao/CropVision-CLIP"
|
|
|
|
|
8 |
processor = CLIPProcessor.from_pretrained(MODEL_ID)
|
9 |
model = CLIPModel.from_pretrained(MODEL_ID)
|
10 |
|
11 |
-
|
12 |
HF_LABELS = [
|
13 |
"Grape leaf with Black rot",
|
14 |
"Grape leaf with Esca (Black Measles)",
|
15 |
"Grape leaf with Leaf blight (Isariopsis Leaf Spot)",
|
16 |
"Healthy Grape leaf"
|
17 |
]
|
18 |
-
# Mapeamento para as tuas classes curtas
|
19 |
MAP = {
|
20 |
"Grape leaf with Black rot": "Black Rot",
|
21 |
"Grape leaf with Esca (Black Measles)": "ESCA",
|
@@ -23,35 +24,34 @@ MAP = {
|
|
23 |
"Healthy Grape leaf": "Healthy"
|
24 |
}
|
25 |
|
26 |
-
def predict(
|
27 |
-
|
28 |
-
img = ImageOps.exif_transpose(
|
29 |
img = img.resize((224,224))
|
30 |
|
31 |
-
|
32 |
inputs = processor(text=HF_LABELS, images=img, return_tensors="pt", padding=True)
|
33 |
outputs = model(**inputs)
|
34 |
probs = outputs.logits_per_image.softmax(dim=1)[0].tolist()
|
35 |
|
36 |
-
# Constrói dicionário label→prob
|
37 |
-
mapping = { MAP[HF_LABELS[i]]: probs[i] for i in range(len(probs)) }
|
38 |
-
# Escolhe a classe de maior probabilidade
|
39 |
-
best = max(mapping, key=mapping.get)
|
40 |
|
41 |
-
|
42 |
-
|
|
|
|
|
43 |
return best, prob_lines
|
44 |
|
45 |
-
|
46 |
demo = gr.Interface(
|
47 |
fn=predict,
|
48 |
inputs=gr.Image(type="pil", label="Carrega uma folha"),
|
49 |
outputs=[
|
50 |
-
|
51 |
-
|
52 |
],
|
53 |
-
title="CropVision
|
54 |
-
description="Healthy
|
55 |
)
|
56 |
|
57 |
if __name__ == "__main__":
|
|
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image, ImageOps
|
|
|
3 |
from transformers import CLIPProcessor, CLIPModel
|
4 |
+
import torch
|
5 |
+
|
6 |
|
|
|
7 |
MODEL_ID = "EduFalcao/CropVision-CLIP"
|
8 |
+
|
9 |
+
|
10 |
processor = CLIPProcessor.from_pretrained(MODEL_ID)
|
11 |
model = CLIPModel.from_pretrained(MODEL_ID)
|
12 |
|
13 |
+
|
14 |
HF_LABELS = [
|
15 |
"Grape leaf with Black rot",
|
16 |
"Grape leaf with Esca (Black Measles)",
|
17 |
"Grape leaf with Leaf blight (Isariopsis Leaf Spot)",
|
18 |
"Healthy Grape leaf"
|
19 |
]
|
|
|
20 |
MAP = {
|
21 |
"Grape leaf with Black rot": "Black Rot",
|
22 |
"Grape leaf with Esca (Black Measles)": "ESCA",
|
|
|
24 |
"Healthy Grape leaf": "Healthy"
|
25 |
}
|
26 |
|
27 |
+
def predict(image: Image.Image):
|
28 |
+
|
29 |
+
img = ImageOps.exif_transpose(image).convert("RGB")
|
30 |
img = img.resize((224,224))
|
31 |
|
32 |
+
|
33 |
inputs = processor(text=HF_LABELS, images=img, return_tensors="pt", padding=True)
|
34 |
outputs = model(**inputs)
|
35 |
probs = outputs.logits_per_image.softmax(dim=1)[0].tolist()
|
36 |
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
mapping = { MAP[HF_LABELS[i]]: probs[i] for i in range(len(probs)) }
|
39 |
+
best = max(mapping, key=mapping.get)
|
40 |
+
prob_lines = "\n".join(f"{cls}: {mapping[cls]:.2f}"
|
41 |
+
for cls in ["Healthy","Leaf Blight","Black Rot","ESCA"])
|
42 |
return best, prob_lines
|
43 |
|
44 |
+
|
45 |
demo = gr.Interface(
|
46 |
fn=predict,
|
47 |
inputs=gr.Image(type="pil", label="Carrega uma folha"),
|
48 |
outputs=[
|
49 |
+
gr.Textbox(label="Classe prevista"),
|
50 |
+
gr.Textbox(label="Probabilidades entre Classes")
|
51 |
],
|
52 |
+
title="CropVision-CLIP",
|
53 |
+
description="Neste modelo vamos classificar folhas de vinhas em Healthy, Leaf Blight, Black Rot ou ESCA"
|
54 |
)
|
55 |
|
56 |
if __name__ == "__main__":
|
57 |
+
demo.launch()
|