hysts HF Staff commited on
Commit
e2b8df1
·
1 Parent(s): f0e8e9b
Files changed (1) hide show
  1. app.py +49 -9
app.py CHANGED
@@ -1,7 +1,6 @@
1
  #!/usr/bin/env python
2
 
3
  import os
4
- import random
5
 
6
  import gradio as gr
7
  import numpy as np
@@ -34,10 +33,22 @@ refiner = DiffusionPipeline.from_pretrained(
34
  ).to(device)
35
 
36
 
37
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
38
- if randomize_seed:
39
- seed = random.randint(0, MAX_SEED) # noqa: S311
40
- return seed
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  @spaces.GPU
@@ -59,6 +70,36 @@ def generate(
59
  apply_refiner: bool = False,
60
  progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
61
  ) -> PIL.Image.Image:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  generator = torch.Generator().manual_seed(seed)
63
 
64
  if not use_negative_prompt:
@@ -249,11 +290,10 @@ with gr.Blocks(css_paths="style.css") as demo:
249
  prompt_2.submit,
250
  negative_prompt_2.submit,
251
  ],
252
- fn=randomize_seed_fn,
253
- inputs=[seed, randomize_seed],
254
  outputs=seed,
255
  queue=False,
256
- api_name=False,
257
  ).then(
258
  fn=generate,
259
  inputs=[
@@ -278,4 +318,4 @@ with gr.Blocks(css_paths="style.css") as demo:
278
  )
279
 
280
  if __name__ == "__main__":
281
- demo.launch()
 
1
  #!/usr/bin/env python
2
 
3
  import os
 
4
 
5
  import gradio as gr
6
  import numpy as np
 
33
  ).to(device)
34
 
35
 
36
+ def get_seed(randomize_seed: bool, seed: int) -> int:
37
+ """Determine and return the random seed to use for model generation or sampling.
38
+
39
+ - MAX_SEED is the maximum value for a 32-bit integer (np.iinfo(np.int32).max).
40
+ - This function is typically used to ensure reproducibility or to introduce randomness in model generation.
41
+ - The random seed affects the stochastic processes in downstream model inference or sampling.
42
+
43
+ Args:
44
+ randomize_seed (bool): If True, a random seed (an integer in [0, MAX_SEED)) is generated using NumPy's default random number generator. If False, the provided seed argument is returned as-is.
45
+ seed (int): The seed value to use if randomize_seed is False.
46
+
47
+ Returns:
48
+ int: The selected seed value. If randomize_seed is True, a randomly generated integer; otherwise, the value of the seed argument.
49
+ """
50
+ rng = np.random.default_rng()
51
+ return int(rng.integers(0, MAX_SEED)) if randomize_seed else seed
52
 
53
 
54
  @spaces.GPU
 
70
  apply_refiner: bool = False,
71
  progress: gr.Progress = gr.Progress(track_tqdm=True), # noqa: ARG001, B008
72
  ) -> PIL.Image.Image:
73
+ """Generates an image from a text prompt using the SDXL (Stable Diffusion XL) model.
74
+
75
+ This function allows fine-grained control over image generation through prompts,
76
+ negative prompts, and optional refinement stages.
77
+
78
+ Note:
79
+ All prompt-related inputs (e.g., `prompt`, `negative_prompt`, `prompt_2`, and `negative_prompt_2`)
80
+ must be written in English for proper model performance.
81
+
82
+ Args:
83
+ prompt (str): Main text prompt used to guide image generation.
84
+ negative_prompt (str, optional): Text specifying elements to exclude from the image.
85
+ prompt_2 (str, optional): Secondary prompt for additional guidance. Used only if `use_prompt_2` is True.
86
+ negative_prompt_2 (str, optional): Secondary negative prompt. Used only if `use_negative_prompt_2` is True.
87
+ use_negative_prompt (bool, optional): Whether to apply `negative_prompt` during generation.
88
+ use_prompt_2 (bool, optional): Whether to apply `prompt_2` during generation.
89
+ use_negative_prompt_2 (bool, optional): Whether to apply `negative_prompt_2` during generation.
90
+ seed (int, optional): Seed for random number generation. Use 0 to generate a random seed.
91
+ width (int, optional): Width of the output image in pixels.
92
+ height (int, optional): Height of the output image in pixels.
93
+ guidance_scale_base (float, optional): Guidance scale for the base model. Higher values follow the prompt more closely.
94
+ guidance_scale_refiner (float, optional): Guidance scale for the refiner model.
95
+ num_inference_steps_base (int, optional): Number of inference steps for the base model.
96
+ num_inference_steps_refiner (int, optional): Number of inference steps for the refiner model.
97
+ apply_refiner (bool, optional): Whether to apply the refiner stage after the base image is generated.
98
+ progress (gr.Progress, optional): Gradio progress object to show progress during generation.
99
+
100
+ Returns:
101
+ PIL.Image.Image: The generated image as a PIL Image object.
102
+ """
103
  generator = torch.Generator().manual_seed(seed)
104
 
105
  if not use_negative_prompt:
 
290
  prompt_2.submit,
291
  negative_prompt_2.submit,
292
  ],
293
+ fn=get_seed,
294
+ inputs=[randomize_seed, seed],
295
  outputs=seed,
296
  queue=False,
 
297
  ).then(
298
  fn=generate,
299
  inputs=[
 
318
  )
319
 
320
  if __name__ == "__main__":
321
+ demo.launch(mcp_server=True)