nftnik commited on
Commit
34b406c
verified
1 Parent(s): 85e3861

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -12
app.py CHANGED
@@ -82,31 +82,180 @@ with torch.inference_mode():
82
  empty_latent = EmptyLatentImage()
83
 
84
  @spaces.GPU
85
- def generate_image(prompt, input_image, strength, progress=gr.Progress(track_tqdm=True)):
86
  try:
87
  with torch.inference_mode():
88
- # Seu c贸digo de gera莽茫o aqui
89
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  except Exception as e:
91
  print(f"Erro ao gerar imagem: {str(e)}")
92
  return None
93
 
94
  # Interface Gradio
95
  with gr.Blocks() as app:
96
- gr.Markdown("# Gerador de Imagens FLUX")
 
97
  with gr.Row():
98
  with gr.Column():
99
- prompt_input = gr.Textbox(label="Prompt", placeholder="Digite seu prompt aqui...", lines=5)
100
- input_image = gr.Image(label="Imagem de Entrada", type="filepath")
101
- strength = gr.Slider(minimum=0, maximum=2, step=0.1, value=1.0, label="For莽a")
102
- generate_btn = gr.Button("Gerar Imagem")
103
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  with gr.Column():
105
- output_image = gr.Image(label="Imagem Gerada", type="filepath")
106
-
107
  generate_btn.click(
108
  fn=generate_image,
109
- inputs=[prompt_input, input_image, strength],
 
 
 
 
 
 
 
 
 
 
 
 
110
  outputs=[output_image]
111
  )
112
 
 
82
  empty_latent = EmptyLatentImage()
83
 
84
  @spaces.GPU
85
+ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps, progress=gr.Progress(track_tqdm=True)):
86
  try:
87
  with torch.inference_mode():
88
+ # Codificar texto
89
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
90
+ encoded_text = cliptextencode.encode(
91
+ text=prompt,
92
+ clip=dualcliploader_357[0]
93
+ )
94
+
95
+ # Carregar e processar imagem
96
+ loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
97
+ loaded_image = loadimage.load_image(image=input_image)
98
+
99
+ # Flux Guidance
100
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
101
+ flux_guidance = fluxguidance.append(
102
+ guidance=guidance,
103
+ conditioning=encoded_text[0]
104
+ )
105
+
106
+ # Carregar LoRA
107
+ loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
108
+ lora_model = loraloadermodelonly.load_lora_model_only(
109
+ lora_name="models/lora/NFTNIK_FLUX.1[dev]_LoRA.safetensors",
110
+ strength_model=lora_weight,
111
+ model=stylemodelloader_441[0]
112
+ )
113
+
114
+ # Redux Advanced
115
+ reduxadvanced = NODE_CLASS_MAPPINGS["ReduxAdvanced"]()
116
+ redux_result = reduxadvanced.apply_stylemodel(
117
+ downsampling_factor=downsampling_factor,
118
+ downsampling_function="area",
119
+ mode="keep aspect ratio",
120
+ weight=weight,
121
+ conditioning=flux_guidance[0],
122
+ style_model=stylemodelloader_441[0],
123
+ image=loaded_image[0]
124
+ )
125
+
126
+ # Empty Latent
127
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
128
+ empty_latent = emptylatentimage.generate(
129
+ width=width,
130
+ height=height,
131
+ batch_size=batch_size
132
+ )
133
+
134
+ # KSampler
135
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
136
+ sampled = ksampler.sample(
137
+ seed=seed,
138
+ steps=steps,
139
+ cfg=1,
140
+ sampler_name="euler",
141
+ scheduler="simple",
142
+ denoise=1,
143
+ model=lora_model[0],
144
+ positive=redux_result[0],
145
+ negative=flux_guidance[0],
146
+ latent_image=empty_latent[0]
147
+ )
148
+
149
+ # Decodificar VAE
150
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
151
+ decoded = vaedecode.decode(
152
+ samples=sampled[0],
153
+ vae=vaeloader_359[0]
154
+ )
155
+
156
+ # Salvar imagem
157
+ temp_filename = f"Flux_{random.randint(0, 99999)}.png"
158
+ temp_path = os.path.join(output_dir, temp_filename)
159
+ Image.fromarray((decoded[0] * 255).astype("uint8")).save(temp_path)
160
+
161
+ return temp_path
162
  except Exception as e:
163
  print(f"Erro ao gerar imagem: {str(e)}")
164
  return None
165
 
166
  # Interface Gradio
167
  with gr.Blocks() as app:
168
+ gr.Markdown("# FLUX Redux Image Generator")
169
+
170
  with gr.Row():
171
  with gr.Column():
172
+ prompt_input = gr.Textbox(
173
+ label="Prompt",
174
+ placeholder="Enter your prompt here...",
175
+ lines=5
176
+ )
177
+ input_image = gr.Image(
178
+ label="Input Image",
179
+ type="filepath"
180
+ )
181
+
182
+ with gr.Row():
183
+ with gr.Column():
184
+ lora_weight = gr.Slider(
185
+ minimum=0,
186
+ maximum=2,
187
+ step=0.1,
188
+ value=0.6,
189
+ label="LoRA Weight"
190
+ )
191
+ guidance = gr.Slider(
192
+ minimum=0,
193
+ maximum=20,
194
+ step=0.1,
195
+ value=3.5,
196
+ label="Guidance"
197
+ )
198
+ downsampling_factor = gr.Slider(
199
+ minimum=1,
200
+ maximum=8,
201
+ step=1,
202
+ value=3,
203
+ label="Downsampling Factor"
204
+ )
205
+ weight = gr.Slider(
206
+ minimum=0,
207
+ maximum=2,
208
+ step=0.1,
209
+ value=1.0,
210
+ label="Model Weight"
211
+ )
212
+ with gr.Column():
213
+ seed = gr.Number(
214
+ value=random.randint(1, 2**64),
215
+ label="Seed",
216
+ precision=0
217
+ )
218
+ width = gr.Number(
219
+ value=1024,
220
+ label="Width",
221
+ precision=0
222
+ )
223
+ height = gr.Number(
224
+ value=1024,
225
+ label="Height",
226
+ precision=0
227
+ )
228
+ batch_size = gr.Number(
229
+ value=1,
230
+ label="Batch Size",
231
+ precision=0
232
+ )
233
+ steps = gr.Number(
234
+ value=20,
235
+ label="Steps",
236
+ precision=0
237
+ )
238
+
239
+ generate_btn = gr.Button("Generate Image")
240
+
241
  with gr.Column():
242
+ output_image = gr.Image(label="Generated Image", type="pil")
243
+
244
  generate_btn.click(
245
  fn=generate_image,
246
+ inputs=[
247
+ prompt_input,
248
+ input_image,
249
+ lora_weight,
250
+ guidance,
251
+ downsampling_factor,
252
+ weight,
253
+ seed,
254
+ width,
255
+ height,
256
+ batch_size,
257
+ steps
258
+ ],
259
  outputs=[output_image]
260
  )
261