File size: 1,537 Bytes
4690160
be15aa1
4690160
 
 
 
 
 
 
6dfd733
4690160
6dfd733
d5d120d
6dfd733
4690160
 
 
 
 
 
6dfd733
4690160
 
 
 
6dfd733
4690160
 
 
 
 
 
 
 
 
 
 
6dfd733
4690160
 
 
 
 
 
 
 
 
 
 
 
 
d5d120d
 
4690160
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import spaces
import os
import torch
import gradio as gr
from fastapi import FastAPI
from huggingface_hub import login
from diffusers import StableDiffusion3Pipeline, DDPMScheduler
from dotenv import load_dotenv
import uvicorn

login(token=os.getenv("HF_TOKEN"))

app = FastAPI(debug=True)

pipeline = StableDiffusion3Pipeline.from_pretrained(
    "stabilityai/stable-diffusion-3-medium",
    revision="refs/pr/26",
    torch_dtype=torch.float16,
)
pipeline.to("cuda")

@app.get("/")
def index():
    print('here')
    return "Hello"

@spaces.GPU
def generate(prompt, negative_prompt, num_inference_steps, height, width, guidance_scale):
    print('start generate', prompt, negative_prompt, num_inference_steps, height, width, guidance_scale)
    return pipeline(
        prompt=prompt,
        negative_prompt=negative_prompt,
        num_inference_steps=num_inference_steps,
        height=height,
        width=width,
        guidance_scale=guidance_scale
    ).images

io = gr.Interface(
    fn=generate,
    inputs=[
        gr.Textbox(label="Prompt", lines=3),
        gr.Textbox(label="Negative Prompt", lines=2),
        gr.Slider(label="Inference Steps", value=20, minimum=1, maximum=30, step=1),
        gr.Number(label="Height"),
        gr.Number(label="Width"),
        gr.Slider(label="Guidance Scale", value=7, minimum=1, maximum=15, step=1)
    ],
    outputs=gr.Gallery(),
)

#app = gr.mount_gradio_app(app, io, path="/gradio")
print('starting')
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7680)