Spaces:
Running
Running
File size: 1,630 Bytes
0ed2ee7 22c4eb1 ee259c3 e07c4e9 3671cba 423e05a cef2c6e 3671cba aa021ca 697988f d687e0e 0ed2ee7 d687e0e e07c4e9 cef2c6e 423e05a cef2c6e 423e05a cef2c6e 423e05a cef2c6e 423e05a cef2c6e aa021ca cef2c6e 423e05a e07c4e9 22c4eb1 cef2c6e 0950a65 ee259c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# https://medium.com/@qacheampong/building-and-deploying-a-fastapi-app-with-hugging-face-9210e9b4a713
# https://huggingface.co/spaces/Queensly/FastAPI_in_Docker
from fastapi import FastAPI,Request
import uvicorn
import json
from PIL import Image
import time
from constants import DESCRIPTION, LOGO
from model import get_pipeline
from utils import replace_background
from diffusers.utils import load_image
import base64
import io
app = FastAPI()
pipeline = get_pipeline()
#Endpoints
#Root endpoints
@app.get("/")
def root():
return {"API": "Sum of 2 Squares"}
@app.post("/img2img")
async def predict(prompt=Body(...),imgbase64data=Body(...)):
MAX_QUEUE_SIZE = 4
start = time.time()
print("参数",imgbase64data,prompt)
image_data = base64.b64decode(imgbase64data)
image1 = Image.open(io.BytesIO(image_data))
w, h = image1.size
newW = 512
newH = int(h * newW / w)
img = image1.resize((newW, newH))
end1 = time.time()
print("图像:", img.size)
print("加载管道:", end1 - start)
result = pipeline(
prompt=prompt,
image=img,
strength=0.6,
seed=10,
width=256,
height=256,
guidance_scale=1,
num_inference_steps=4,
)
output_image = result.images[0]
end2 = time.time()
print("测试",output_image)
print("s生成完成:", end2 - end1)
# 将图片对象转换为bytes
output_image_base64 = base64.b64encode(output_image.tobytes()).decode()
return output_image_base64
@app.post("/predict")
async def predict(prompt=Body(...)):
return f"您好,{prompt}"
|