Alex commited on
Commit
7364060
·
1 Parent(s): 779723a

added check

Browse files
Files changed (3) hide show
  1. README.md +10 -3
  2. app.py +33 -16
  3. requirements.txt +2 -1
README.md CHANGED
@@ -11,7 +11,7 @@ pinned: false
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
14
-
15
  curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
16
  -F "data=[{\"type\": \"image\", \"value\": null}]" \
17
  -F "data=@woman_with_bag.jpeg" \
@@ -19,15 +19,22 @@ curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
19
  -o response.json
20
 
21
 
 
22
  curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
23
  -H "accept: application/json" \
24
  -H "Content-Type: multipart/form-data" \
25
  -F "file=@woman_with_bag.jpeg"
26
 
27
 
 
 
 
 
 
 
28
 
29
- curl -X POST -F "file=@woman_with_bag.jpeg" https://alexgenovese-segmentation.hf.space/api/segment
30
 
31
- curl -X POST https://alexgenovese-segmentation.hf.space/segment \
32
  -H "Content-Type: application/json" \
33
  -d "{\"image_base64\": \"$(base64 woman_with_bag.jpeg)\"}"
 
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
 
14
+ # Sending image file to api
15
  curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
16
  -F "data=[{\"type\": \"image\", \"value\": null}]" \
17
  -F "data=@woman_with_bag.jpeg" \
 
19
  -o response.json
20
 
21
 
22
+ # Sending image file to api
23
  curl -X POST "https://alexgenovese-segmentation.hf.space/segment" \
24
  -H "accept: application/json" \
25
  -H "Content-Type: multipart/form-data" \
26
  -F "file=@woman_with_bag.jpeg"
27
 
28
 
29
+ # Sending image in base 64 to api
30
+ 1. base64 woman_with_bag.jpeg > image.b64
31
+
32
+ 2. curl -X POST "http://localhost:7860/segment" \
33
+ -H "Content-Type: application/json" \
34
+ -d "{\"image_base64\": \"$(cat image.b64)\"}"
35
 
36
+ ir
37
 
38
+ curl -X POST "http://localhost:7860/segment" \
39
  -H "Content-Type: application/json" \
40
  -d "{\"image_base64\": \"$(base64 woman_with_bag.jpeg)\"}"
app.py CHANGED
@@ -1,29 +1,43 @@
1
- from fastapi import FastAPI, File, UploadFile
2
  from transformers import SamModel, SamProcessor
3
  import torch
4
  from PIL import Image
5
  import numpy as np
6
  import io
7
  import base64
 
8
 
9
  # Inizializza l'app FastAPI
10
  app = FastAPI()
11
 
 
 
 
 
12
  # Carica il modello e il processore SAM
13
- model = SamModel.from_pretrained("facebook/sam-vit-base")
14
- processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
15
- model.to("cpu") # Usa CPU per il free tier
 
 
 
 
 
 
16
 
17
  # Funzione per segmentare l'immagine
18
  def segment_image(image: Image.Image):
19
  # Prepara l'input per SAM
 
20
  inputs = processor(image, return_tensors="pt").to("cpu")
21
 
22
  # Inferenza
 
23
  with torch.no_grad():
24
  outputs = model(**inputs, multimask_output=False)
25
 
26
  # Post-processa la maschera
 
27
  mask = processor.image_processor.post_process_masks(
28
  outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
29
  )[0][0].cpu().numpy()
@@ -44,18 +58,21 @@ def segment_image(image: Image.Image):
44
  # Endpoint API
45
  @app.post("/segment")
46
  async def segment_endpoint(file: UploadFile = File(...)):
47
- # Leggi l'immagine caricata
48
- image_data = await file.read()
49
- image = Image.open(io.BytesIO(image_data)).convert("RGB")
50
-
51
- # Segmenta l'immagine
52
- mask_base64, annotations = segment_image(image)
53
-
54
- # Restituisci la risposta
55
- return {
56
- "mask": f"data:image/png;base64,{mask_base64}",
57
- "annotations": annotations
58
- }
 
 
 
59
 
60
  # Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
61
  if __name__ == "__main__":
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException
2
  from transformers import SamModel, SamProcessor
3
  import torch
4
  from PIL import Image
5
  import numpy as np
6
  import io
7
  import base64
8
+ import logging
9
 
10
  # Inizializza l'app FastAPI
11
  app = FastAPI()
12
 
13
+ # Configura il logging
14
+ logging.basicConfig(level=logging.INFO)
15
+ logger = logging.getLogger(__name__)
16
+
17
  # Carica il modello e il processore SAM
18
+ try:
19
+ logger.info("Caricamento del modello SAM...")
20
+ model = SamModel.from_pretrained("facebook/sam-vit-base")
21
+ processor = SamProcessor.from_pretrained("facebook/sam-vit-base")
22
+ model.to("cpu") # Usa CPU per il free tier
23
+ logger.info("Modello caricato con successo.")
24
+ except Exception as e:
25
+ logger.error(f"Errore nel caricamento del modello: {str(e)}")
26
+ raise RuntimeError(f"Errore nel caricamento del modello: {str(e)}")
27
 
28
  # Funzione per segmentare l'immagine
29
  def segment_image(image: Image.Image):
30
  # Prepara l'input per SAM
31
+ logger.info("Preparazione dell'immagine per l'inferenza...")
32
  inputs = processor(image, return_tensors="pt").to("cpu")
33
 
34
  # Inferenza
35
+ logger.info("Esecuzione dell'inferenza...")
36
  with torch.no_grad():
37
  outputs = model(**inputs, multimask_output=False)
38
 
39
  # Post-processa la maschera
40
+ logger.info("Post-processing della maschera...")
41
  mask = processor.image_processor.post_process_masks(
42
  outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"]
43
  )[0][0].cpu().numpy()
 
58
  # Endpoint API
59
  @app.post("/segment")
60
  async def segment_endpoint(file: UploadFile = File(...)):
61
+ try:
62
+ logger.info("Ricezione del file...")
63
+ image_data = await file.read()
64
+ image = Image.open(io.BytesIO(image_data)).convert("RGB")
65
+
66
+ logger.info("Segmentazione dell'immagine...")
67
+ mask_base64, annotations = segment_image(image)
68
+
69
+ return {
70
+ "mask": f"data:image/png;base64,{mask_base64}",
71
+ "annotations": annotations
72
+ }
73
+ except Exception as e:
74
+ logger.error(f"Errore nell'endpoint: {str(e)}")
75
+ raise HTTPException(status_code=500, detail=f"Errore nell'elaborazione: {str(e)}")
76
 
77
  # Per compatibilità con Hugging Face Spaces (Uvicorn viene gestito automaticamente)
78
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -1,6 +1,7 @@
 
 
1
  torch
2
  torchvision
3
  transformers
4
- gradio
5
  pillow
6
  numpy
 
1
+ fastapi
2
+ uvicorn
3
  torch
4
  torchvision
5
  transformers
 
6
  pillow
7
  numpy