Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, UploadFile, File
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from transformers import AutoProcessor, CLIPModel
|
4 |
+
from PIL import Image
|
5 |
+
import torch
|
6 |
+
|
7 |
+
# Charger le modèle CLIP et le processeur
|
8 |
+
model = CLIPModel.from_pretrained("patrickjohncyh/fashion-clip")
|
9 |
+
processor = AutoProcessor.from_pretrained("patrickjohncyh/fashion-clip")
|
10 |
+
|
11 |
+
# Définir une classe de demande Pydantic
|
12 |
+
class ImageTextRequest(BaseModel):
|
13 |
+
text: str
|
14 |
+
image: bytes
|
15 |
+
|
16 |
+
# Créer une instance FastAPI
|
17 |
+
app = FastAPI()
|
18 |
+
|
19 |
+
# Définir la fonction de classification
|
20 |
+
def classify_image_with_text(request: ImageTextRequest):
|
21 |
+
# Convertir l'image en format PIL
|
22 |
+
image_pil = Image.open(request.image)
|
23 |
+
|
24 |
+
# Prétraiter l'image
|
25 |
+
image_tensor = processor(
|
26 |
+
text=request.text, images=image_pil, return_tensors="pt", padding=True
|
27 |
+
)
|
28 |
+
|
29 |
+
# Effectuer la classification
|
30 |
+
outputs = model(**image_tensor)
|
31 |
+
logits_per_image = outputs.logits_per_image
|
32 |
+
probs = logits_per_image.softmax(dim=1)
|
33 |
+
predicted_class_index = probs.argmax(dim=1).item()
|
34 |
+
predicted_label = request.text.split(',')[predicted_class_index]
|
35 |
+
return predicted_label
|
36 |
+
|
37 |
+
# Définir une route d'API
|
38 |
+
@app.post("/classify")
|
39 |
+
async def classify(request: ImageTextRequest):
|
40 |
+
return {"predicted_label": classify_image_with_text(request)}
|