from fastapi import FastAPI, UploadFile, File from transformers import pipeline from fastai.vision.all import * from PIL import Image # NOTE - we configure docs_url to serve the interactive Docs at the root path # of the app. This way, we can use the docs as a landing page for the app on Spaces. app = FastAPI(docs_url="/") pipe = pipeline("text2text-generation", model="google/flan-t5-small") categories = ('Heart', 'Oblong', 'Oval', 'Round', 'Square') learn = load_learner('model.pkl') @app.get("/generate") def generate(text: str): """ Using the text2text-generation pipeline from `transformers`, generate text from the given input text. The model used is `google/flan-t5-small`, which can be found [here](https://huggingface.co/google/flan-t5-small). """ output = pipe(text) return {"output": output[0]["generated_text"]} @app.post("/face-analyse") async def face_analyse(file: UploadFile = File(...)): # Read the uploaded file content request_object_content = await file.read() try: # Attempt to open the image img = Image.open(io.BytesIO(request_object_content)) except Exception as e: return {"error": "Failed to open the image file. Make sure it is a valid image file."} # Check if img is None or not if img is None: return {"error": "Failed to open the image file."} try: # Resize the image to 300x300 pixels img = img.resize((300, 300)) except Exception as e: return {"error": "Failed to resize the image."} try: # Assuming 'learn' is your image classifier model pred, idx, probs = learn.predict(img) except Exception as e: return {"error": "Failed to make predictions."} # Assuming categories is a list of category labels return dict(zip(categories, map(float, probs)))