ovi054 commited on
Commit
753f6ea
·
verified ·
1 Parent(s): 2ef175c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -126
app.py CHANGED
@@ -7,7 +7,6 @@ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, Autoe
7
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
 
10
-
11
  dtype = torch.bfloat16
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
@@ -21,7 +20,6 @@ MAX_IMAGE_SIZE = 2048
21
 
22
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
23
 
24
-
25
  @spaces.GPU()
26
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, lora_id=None, lora_scale=0.95, progress=gr.Progress(track_tqdm=True)):
27
  if randomize_seed:
@@ -68,31 +66,10 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
68
  # Unload LoRA weights if they were loaded
69
  if lora_id:
70
  pipe.unload_lora_weights()
 
71
 
72
 
73
 
74
- examples = [
75
- "a tiny astronaut hatching from an egg on the moon",
76
- "a cat holding a sign that says hello world",
77
- "an anime illustration of a wiener schnitzel",
78
- ]
79
-
80
- css = """
81
- #col-container {
82
- margin: 0 auto;
83
- max-width: 960px;
84
- }
85
- .generate-btn {
86
- background: linear-gradient(90deg, #4B79A1 0%, #283E51 100%) !important;
87
- border: none !important;
88
- color: white !important;
89
- }
90
- .generate-btn:hover {
91
- transform: translateY(-2px);
92
- box-shadow: 0 5px 15px rgba(0,0,0,0.2);
93
- }
94
- """
95
-
96
  # with gr.Blocks(css=css) as app:
97
  # gr.HTML("<center><h1>FLUX.1-Dev with LoRA support</h1></center>")
98
  # with gr.Column(elem_id="col-container"):
@@ -147,105 +124,3 @@ css = """
147
  # # text_button.click(infer, inputs=[text_prompt, seed, randomize_seed, width, height, cfg, steps, custom_lora, lora_scale], outputs=[image_output,seed_output, seed])
148
 
149
  # app.launch(share=True)
150
-
151
-
152
- with gr.Blocks(css=css) as demo:
153
-
154
- with gr.Column(elem_id="col-container"):
155
- gr.Markdown(f"""# FLUX.1 [dev] LoRA
156
- 12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
157
- [[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
158
- """)
159
-
160
- with gr.Row():
161
-
162
- prompt = gr.Text(
163
- label="Prompt",
164
- show_label=False,
165
- max_lines=1,
166
- placeholder="Enter your prompt",
167
- container=False,
168
- )
169
-
170
- run_button = gr.Button("Run", scale=0)
171
-
172
- result = gr.Image(label="Result", show_label=False)
173
-
174
- with gr.Accordion("Advanced Settings", open=False):
175
-
176
- seed = gr.Slider(
177
- label="Seed",
178
- minimum=0,
179
- maximum=MAX_SEED,
180
- step=1,
181
- value=0,
182
- )
183
-
184
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
185
-
186
- with gr.Row():
187
-
188
- width = gr.Slider(
189
- label="Width",
190
- minimum=256,
191
- maximum=MAX_IMAGE_SIZE,
192
- step=8,
193
- value=1024,
194
- )
195
-
196
- height = gr.Slider(
197
- label="Height",
198
- minimum=256,
199
- maximum=MAX_IMAGE_SIZE,
200
- step=8,
201
- value=1024,
202
- )
203
-
204
- with gr.Row():
205
-
206
- guidance_scale = gr.Slider(
207
- label="Guidance Scale",
208
- minimum=1,
209
- maximum=15,
210
- step=0.1,
211
- value=3.5,
212
- )
213
-
214
- num_inference_steps = gr.Slider(
215
- label="Number of inference steps",
216
- minimum=1,
217
- maximum=50,
218
- step=1,
219
- value=28,
220
- )
221
-
222
- with gr.Row():
223
- lora_id = gr.Textbox(
224
- label="LoRA Model ID (HuggingFace path)",
225
- placeholder="username/lora-model",
226
- max_lines=1
227
- )
228
- lora_scale = gr.Slider(
229
- label="LoRA Scale",
230
- minimum=0,
231
- maximum=2,
232
- step=0.01,
233
- value=0.95,
234
- )
235
-
236
- gr.Examples(
237
- examples = examples,
238
- fn = infer,
239
- inputs = [prompt],
240
- outputs = [result, seed],
241
- cache_examples="lazy"
242
- )
243
-
244
- gr.on(
245
- triggers=[run_button.click, prompt.submit],
246
- fn = infer,
247
- inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,lora_id,lora_scale],
248
- outputs = [result, seed]
249
- )
250
-
251
- demo.launch()
 
7
  from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
8
  from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
9
 
 
10
  dtype = torch.bfloat16
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
 
 
20
 
21
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
22
 
 
23
  @spaces.GPU()
24
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, lora_id=None, lora_scale=0.95, progress=gr.Progress(track_tqdm=True)):
25
  if randomize_seed:
 
66
  # Unload LoRA weights if they were loaded
67
  if lora_id:
68
  pipe.unload_lora_weights()
69
+
70
 
71
 
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  # with gr.Blocks(css=css) as app:
74
  # gr.HTML("<center><h1>FLUX.1-Dev with LoRA support</h1></center>")
75
  # with gr.Column(elem_id="col-container"):
 
124
  # # text_button.click(infer, inputs=[text_prompt, seed, randomize_seed, width, height, cfg, steps, custom_lora, lora_scale], outputs=[image_output,seed_output, seed])
125
 
126
  # app.launch(share=True)