eder0782 commited on
Commit
e450df1
verified
1 Parent(s): f611d13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -26
app.py CHANGED
@@ -4,9 +4,9 @@ import random
4
  import spaces
5
  import torch
6
  from diffusers import DiffusionPipeline
7
- import os
8
- import uuid
9
- from datetime import datetime
10
 
11
  dtype = torch.bfloat16
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -16,10 +16,6 @@ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", tor
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 2048
18
 
19
- # Diret贸rio para salvar imagens (use /tmp para Hugging Face Spaces)
20
- OUTPUT_DIR = "/tmp"
21
- os.makedirs(OUTPUT_DIR, exist_ok=True)
22
-
23
  @spaces.GPU()
24
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
25
  if randomize_seed:
@@ -36,18 +32,13 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_in
36
  guidance_scale=0.0
37
  ).images[0]
38
 
39
- # Gerar nome de arquivo 煤nico
40
- filename = f"image_{uuid.uuid4().hex}.png"
41
- filepath = os.path.join(OUTPUT_DIR, filename)
42
-
43
- # Salvar a imagem
44
- image.save(filepath)
45
-
46
- # Construir o URL p煤blico (ajuste conforme a URL do seu Space)
47
- space_url = "https://eder0782-flux-image-generator.hf.space"
48
- image_url = f"{space_url}/file/{filename}"
49
 
50
- return {"image_url": image_url, "seed": seed}
 
51
 
52
  examples = [
53
  "a tiny astronaut hatching from an egg on the moon",
@@ -80,6 +71,7 @@ with gr.Blocks(css=css) as demo:
80
  run_button = gr.Button("Run", scale=0)
81
 
82
  result = gr.Image(label="Result", show_label=False)
 
83
 
84
  with gr.Accordion("Advanced Settings", open=False):
85
  seed = gr.Slider(
@@ -117,20 +109,21 @@ with gr.Blocks(css=css) as demo:
117
  examples=examples,
118
  fn=infer,
119
  inputs=[prompt],
120
- outputs=[result, seed],
121
- cache_examples="lazy"
 
122
  )
123
 
124
- # Ajustar a sa铆da para a interface
125
- def format_output(output):
126
- return output["image_url"], output["seed"]
 
127
 
128
  gr.on(
129
  triggers=[run_button.click, prompt.submit],
130
- fn=infer,
131
  inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
132
- outputs=[result, seed],
133
- _js=format_output
134
  )
135
 
136
  demo.launch()
 
4
  import spaces
5
  import torch
6
  from diffusers import DiffusionPipeline
7
+ import io
8
+ import base64
9
+ from PIL import Image
10
 
11
  dtype = torch.bfloat16
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
  MAX_IMAGE_SIZE = 2048
18
 
 
 
 
 
19
  @spaces.GPU()
20
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
21
  if randomize_seed:
 
32
  guidance_scale=0.0
33
  ).images[0]
34
 
35
+ # Converter a imagem para Base64
36
+ buffered = io.BytesIO()
37
+ image.save(buffered, format="PNG")
38
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
 
 
 
 
 
 
39
 
40
+ # Retornar JSON com Base64 e seed
41
+ return {"image_base64": f"data:image/png;base64,{img_str}", "seed": seed}
42
 
43
  examples = [
44
  "a tiny astronaut hatching from an egg on the moon",
 
71
  run_button = gr.Button("Run", scale=0)
72
 
73
  result = gr.Image(label="Result", show_label=False)
74
+ seed_output = gr.Number(label="Seed", show_label=True)
75
 
76
  with gr.Accordion("Advanced Settings", open=False):
77
  seed = gr.Slider(
 
109
  examples=examples,
110
  fn=infer,
111
  inputs=[prompt],
112
+ outputs=[result, seed_output],
113
+ cache_examples=True,
114
+ cache_mode="lazy"
115
  )
116
 
117
+ # Fun莽茫o para formatar a sa铆da para a interface
118
+ def format_output(prompt, seed, randomize_seed, width, height, num_inference_steps):
119
+ output = infer(prompt, seed, randomize_seed, width, height, num_inference_steps)
120
+ return output["image_base64"], output["seed"]
121
 
122
  gr.on(
123
  triggers=[run_button.click, prompt.submit],
124
+ fn=format_output,
125
  inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
126
+ outputs=[result, seed_output]
 
127
  )
128
 
129
  demo.launch()