File size: 1,967 Bytes
3b62419
7077928
3b62419
7077928
 
3b62419
 
7077928
3b62419
 
 
 
7077928
 
3b62419
7077928
3b62419
 
 
 
7077928
 
 
 
 
3b62419
7077928
 
 
 
3b62419
7077928
 
3b62419
 
 
 
7077928
3b62419
 
 
 
7077928
3b62419
 
779723a
 
 
 
 
 
 
 
 
 
 
 
 
7077928
3b62419
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from fastapi import FastAPI, File, UploadFile
from transformers import SamModel, SamProcessor
import torch
from PIL import Image
import numpy as np
import io
import base64

# Inizializza l'app FastAPI
app = FastAPI()

# Carica il modello e il processore SAM
model = SamModel.from_pretrained("facebook/sam-vit-base")
processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
model.to("cpu")  # Usa CPU per il free tier

# Funzione per segmentare l'immagine
def segment_image(image: Image.Image):
    # Prepara l'input per SAM
    inputs = processor(image, return_tensors="pt").to("cpu")
    
    # Inferenza
    with torch.no_grad():
        outputs = model(**inputs, multimask_output=False)
    
    # Post-processa la maschera
    mask = processor.image_processor.post_process_masks(
        outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
    )[0][0].cpu().numpy()
    
    # Converti la maschera in immagine
    mask_img = Image.fromarray((mask * 255).astype(np.uint8))
    
    # Converti la maschera in base64 per la risposta
    buffered = io.BytesIO()
    mask_img.save(buffered, format="PNG")
    mask_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
    
    # Annotazioni
    annotations = {"mask": mask.tolist(), "label": "object"}
    
    return mask_base64, annotations

# Endpoint API
@app.post("/segment")
async def segment_endpoint(file: UploadFile = File(...)):
    # Leggi l'immagine caricata
    image_data = await file.read()
    image = Image.open(io.BytesIO(image_data)).convert("RGB")
    
    # Segmenta l'immagine
    mask_base64, annotations = segment_image(image)
    
    # Restituisci la risposta
    return {
        "mask": f"data:image/png;base64,{mask_base64}",
        "annotations": annotations
    }

# Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)