SpyC0der77 commited on
Commit
a52d3e7
·
verified ·
1 Parent(s): 6d9e471

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -42
app.py CHANGED
@@ -1,29 +1,18 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
-
5
-
6
- # import spaces #[uncomment to use ZeroGPU]
7
  from diffusers import DiffusionPipeline
8
  import torch
9
 
10
- device = "cuda" if torch.cuda.is_available() else "cpu"
11
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
12
-
13
- if torch.cuda.is_available():
14
- torch_dtype = torch.float16
15
- else:
16
- torch_dtype = torch.float32
17
-
18
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
19
  pipe.load_lora_weights("EvanZhouDev/open-genmoji")
 
20
  pipe = pipe.to(device)
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
  MAX_IMAGE_SIZE = 1024
24
 
25
-
26
- # @spaces.GPU #[uncomment to use ZeroGPU]
27
  def infer(
28
  prompt,
29
  negative_prompt,
@@ -33,25 +22,24 @@ def infer(
33
  height,
34
  guidance_scale,
35
  num_inference_steps,
36
- progress=gr.Progress(track_tqdm=True),
37
  ):
 
38
  if randomize_seed:
39
  seed = random.randint(0, MAX_SEED)
 
40
 
41
- generator = torch.Generator().manual_seed(seed)
42
-
43
- image = pipe(
44
  prompt=prompt,
45
- negative_prompt=negative_prompt,
46
- guidance_scale=guidance_scale,
47
- num_inference_steps=num_inference_steps,
48
  width=width,
49
  height=height,
 
 
50
  generator=generator,
51
  ).images[0]
52
 
53
- return image, seed
54
-
55
 
56
  examples = [
57
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
@@ -88,7 +76,7 @@ with gr.Blocks(css=css) as demo:
88
  label="Negative prompt",
89
  max_lines=1,
90
  placeholder="Enter a negative prompt",
91
- visible=False,
92
  )
93
 
94
  seed = gr.Slider(
@@ -107,7 +95,7 @@ with gr.Blocks(css=css) as demo:
107
  minimum=256,
108
  maximum=MAX_IMAGE_SIZE,
109
  step=32,
110
- value=1024, # Replace with defaults that work for your model
111
  )
112
 
113
  height = gr.Slider(
@@ -115,7 +103,7 @@ with gr.Blocks(css=css) as demo:
115
  minimum=256,
116
  maximum=MAX_IMAGE_SIZE,
117
  step=32,
118
- value=1024, # Replace with defaults that work for your model
119
  )
120
 
121
  with gr.Row():
@@ -124,7 +112,7 @@ with gr.Blocks(css=css) as demo:
124
  minimum=0.0,
125
  maximum=10.0,
126
  step=0.1,
127
- value=0.0, # Replace with defaults that work for your model
128
  )
129
 
130
  num_inference_steps = gr.Slider(
@@ -132,25 +120,25 @@ with gr.Blocks(css=css) as demo:
132
  minimum=1,
133
  maximum=50,
134
  step=1,
135
- value=2, # Replace with defaults that work for your model
136
  )
137
 
138
  gr.Examples(examples=examples, inputs=[prompt])
139
- gr.on(
140
- triggers=[run_button.click, prompt.submit],
141
- fn=infer,
142
- inputs=[
143
- prompt,
144
- negative_prompt,
145
- seed,
146
- randomize_seed,
147
- width,
148
- height,
149
- guidance_scale,
150
- num_inference_steps,
151
- ],
152
- outputs=[result, seed],
153
- )
154
 
155
  if __name__ == "__main__":
156
  demo.launch()
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
 
 
4
  from diffusers import DiffusionPipeline
5
  import torch
6
 
7
+ # Load the pipeline
 
 
 
 
 
 
 
8
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
9
  pipe.load_lora_weights("EvanZhouDev/open-genmoji")
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
  pipe = pipe.to(device)
12
 
13
  MAX_SEED = np.iinfo(np.int32).max
14
  MAX_IMAGE_SIZE = 1024
15
 
 
 
16
  def infer(
17
  prompt,
18
  negative_prompt,
 
22
  height,
23
  guidance_scale,
24
  num_inference_steps,
 
25
  ):
26
+ # Handle seed randomization
27
  if randomize_seed:
28
  seed = random.randint(0, MAX_SEED)
29
+ generator = torch.manual_seed(seed)
30
 
31
+ # Generate the image using the pipeline
32
+ result = pipe(
 
33
  prompt=prompt,
34
+ negative_prompt=negative_prompt if negative_prompt else None,
 
 
35
  width=width,
36
  height=height,
37
+ guidance_scale=guidance_scale,
38
+ num_inference_steps=num_inference_steps,
39
  generator=generator,
40
  ).images[0]
41
 
42
+ return result, seed
 
43
 
44
  examples = [
45
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
 
76
  label="Negative prompt",
77
  max_lines=1,
78
  placeholder="Enter a negative prompt",
79
+ visible=True,
80
  )
81
 
82
  seed = gr.Slider(
 
95
  minimum=256,
96
  maximum=MAX_IMAGE_SIZE,
97
  step=32,
98
+ value=512, # Default width for your model
99
  )
100
 
101
  height = gr.Slider(
 
103
  minimum=256,
104
  maximum=MAX_IMAGE_SIZE,
105
  step=32,
106
+ value=512, # Default height for your model
107
  )
108
 
109
  with gr.Row():
 
112
  minimum=0.0,
113
  maximum=10.0,
114
  step=0.1,
115
+ value=7.5, # Default guidance scale for your model
116
  )
117
 
118
  num_inference_steps = gr.Slider(
 
120
  minimum=1,
121
  maximum=50,
122
  step=1,
123
+ value=25, # Default number of inference steps for your model
124
  )
125
 
126
  gr.Examples(examples=examples, inputs=[prompt])
127
+
128
+ run_button.click(
129
+ infer,
130
+ inputs=[
131
+ prompt,
132
+ negative_prompt,
133
+ seed,
134
+ randomize_seed,
135
+ width,
136
+ height,
137
+ guidance_scale,
138
+ num_inference_steps,
139
+ ],
140
+ outputs=[result, seed],
141
+ )
142
 
143
  if __name__ == "__main__":
144
  demo.launch()