Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ from diffusers import DiffusionPipeline
|
|
7 |
import io
|
8 |
import base64
|
9 |
from PIL import Image
|
|
|
10 |
|
11 |
dtype = torch.bfloat16
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
@@ -40,6 +41,11 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
|
|
40 |
# Retornar JSON com Base64 e seed
|
41 |
return {"image_base64": f"data:image/png;base64,{img_str}", "seed": seed}
|
42 |
|
|
|
|
|
|
|
|
|
|
|
43 |
examples = [
|
44 |
"a tiny astronaut hatching from an egg on the moon",
|
45 |
"a cat holding a sign that says hello world",
|
@@ -119,6 +125,7 @@ with gr.Blocks(css=css) as demo:
|
|
119 |
output = infer(prompt, seed, randomize_seed, width, height, num_inference_steps)
|
120 |
return output["image_base64"], output["seed"]
|
121 |
|
|
|
122 |
gr.on(
|
123 |
triggers=[run_button.click, prompt.submit],
|
124 |
fn=format_output,
|
@@ -126,4 +133,7 @@ with gr.Blocks(css=css) as demo:
|
|
126 |
outputs=[result, seed_output]
|
127 |
)
|
128 |
|
|
|
|
|
|
|
129 |
demo.launch()
|
|
|
7 |
import io
|
8 |
import base64
|
9 |
from PIL import Image
|
10 |
+
import json
|
11 |
|
12 |
dtype = torch.bfloat16
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
41 |
# Retornar JSON com Base64 e seed
|
42 |
return {"image_base64": f"data:image/png;base64,{img_str}", "seed": seed}
|
43 |
|
44 |
+
# Função para a API personalizada
|
45 |
+
def api_infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4):
|
46 |
+
result = infer(prompt, seed, randomize_seed, width, height, num_inference_steps)
|
47 |
+
return result # Retorna diretamente o JSON
|
48 |
+
|
49 |
examples = [
|
50 |
"a tiny astronaut hatching from an egg on the moon",
|
51 |
"a cat holding a sign that says hello world",
|
|
|
125 |
output = infer(prompt, seed, randomize_seed, width, height, num_inference_steps)
|
126 |
return output["image_base64"], output["seed"]
|
127 |
|
128 |
+
# Interface Gradio
|
129 |
gr.on(
|
130 |
triggers=[run_button.click, prompt.submit],
|
131 |
fn=format_output,
|
|
|
133 |
outputs=[result, seed_output]
|
134 |
)
|
135 |
|
136 |
+
# Endpoint personalizado para a API
|
137 |
+
demo.queue(api_name="infer_api").launch()
|
138 |
+
|
139 |
demo.launch()
|