# https://medium.com/@qacheampong/building-and-deploying-a-fastapi-app-with-hugging-face-9210e9b4a713 # https://huggingface.co/spaces/Queensly/FastAPI_in_Docker from fastapi import FastAPI,Request import uvicorn import json from PIL import Image import time from constants import DESCRIPTION, LOGO from model import get_pipeline from utils import replace_background from diffusers.utils import load_image import base64 import io app = FastAPI() pipeline = get_pipeline() #Endpoints #Root endpoints @app.get("/") def root(): return {"API": "Sum of 2 Squares"} @app.post("/img2img") async def predict(prompt=Body(...),imgbase64data=Body(...)): MAX_QUEUE_SIZE = 4 start = time.time() print("参数",imgbase64data,prompt) image_data = base64.b64decode(imgbase64data) image1 = Image.open(io.BytesIO(image_data)) w, h = image1.size newW = 512 newH = int(h * newW / w) img = image1.resize((newW, newH)) end1 = time.time() print("图像:", img.size) print("加载管道:", end1 - start) result = pipeline( prompt=prompt, image=img, strength=0.6, seed=10, width=256, height=256, guidance_scale=1, num_inference_steps=4, ) output_image = result.images[0] end2 = time.time() print("测试",output_image) print("s生成完成:", end2 - end1) # 将图片对象转换为bytes output_image_base64 = base64.b64encode(output_image.tobytes()).decode() return output_image_base64 @app.post("/predict") async def predict(prompt=Body(...)): return f"您好,{prompt}"