Spanicin commited on
Commit
6a500ed
·
verified ·
1 Parent(s): 0d5c900

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -472
app.py CHANGED
@@ -1,469 +1,3 @@
1
- # # import logging
2
- # # import random
3
- # # import warnings
4
- # # import os
5
- # # import gradio as gr
6
- # # import numpy as np
7
- # # import spaces
8
- # # import torch
9
- # # from diffusers import FluxControlNetModel
10
- # # from diffusers.pipelines import FluxControlNetPipeline
11
- # # from gradio_imageslider import ImageSlider
12
- # # from PIL import Image
13
- # # from huggingface_hub import snapshot_download
14
-
15
- # # css = """
16
- # # #col-container {
17
- # # margin: 0 auto;
18
- # # max-width: 512px;
19
- # # }
20
- # # """
21
-
22
- # # if torch.cuda.is_available():
23
- # # power_device = "GPU"
24
- # # device = "cuda"
25
- # # else:
26
- # # power_device = "CPU"
27
- # # device = "cpu"
28
-
29
-
30
- # # huggingface_token = os.getenv("HUGGINFACE_TOKEN")
31
-
32
- # # model_path = snapshot_download(
33
- # # repo_id="black-forest-labs/FLUX.1-dev",
34
- # # repo_type="model",
35
- # # ignore_patterns=["*.md", "*..gitattributes"],
36
- # # local_dir="FLUX.1-dev",
37
- # # token=huggingface_token, # type a new token-id.
38
- # # )
39
-
40
-
41
- # # # Load pipeline
42
- # # controlnet = FluxControlNetModel.from_pretrained(
43
- # # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
44
- # # ).to(device)
45
- # # pipe = FluxControlNetPipeline.from_pretrained(
46
- # # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
47
- # # )
48
- # # pipe.to(device)
49
-
50
- # # MAX_SEED = 1000000
51
- # # MAX_PIXEL_BUDGET = 1024 * 1024
52
-
53
-
54
- # # def process_input(input_image, upscale_factor, **kwargs):
55
- # # w, h = input_image.size
56
- # # w_original, h_original = w, h
57
- # # aspect_ratio = w / h
58
-
59
- # # was_resized = False
60
-
61
- # # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
62
- # # warnings.warn(
63
- # # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels."
64
- # # )
65
- # # gr.Info(
66
- # # f"Requested output image is too large ({w * upscale_factor}x{h * upscale_factor}). Resizing input to ({int(aspect_ratio * MAX_PIXEL_BUDGET ** 0.5 // upscale_factor), int(MAX_PIXEL_BUDGET ** 0.5 // aspect_ratio // upscale_factor)}) pixels budget."
67
- # # )
68
- # # input_image = input_image.resize(
69
- # # (
70
- # # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
71
- # # int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
72
- # # )
73
- # # )
74
- # # was_resized = True
75
-
76
- # # # resize to multiple of 8
77
- # # w, h = input_image.size
78
- # # w = w - w % 8
79
- # # h = h - h % 8
80
-
81
- # # return input_image.resize((w, h)), w_original, h_original, was_resized
82
-
83
-
84
- # # @spaces.GPU#(duration=42)
85
- # # def infer(
86
- # # seed,
87
- # # randomize_seed,
88
- # # input_image,
89
- # # num_inference_steps,
90
- # # upscale_factor,
91
- # # controlnet_conditioning_scale,
92
- # # progress=gr.Progress(track_tqdm=True),
93
- # # ):
94
- # # if randomize_seed:
95
- # # seed = random.randint(0, MAX_SEED)
96
- # # true_input_image = input_image
97
- # # input_image, w_original, h_original, was_resized = process_input(
98
- # # input_image, upscale_factor
99
- # # )
100
-
101
- # # # rescale with upscale factor
102
- # # w, h = input_image.size
103
- # # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
104
-
105
- # # generator = torch.Generator().manual_seed(seed)
106
-
107
- # # gr.Info("Upscaling image...")
108
- # # image = pipe(
109
- # # prompt="",
110
- # # control_image=control_image,
111
- # # controlnet_conditioning_scale=controlnet_conditioning_scale,
112
- # # num_inference_steps=num_inference_steps,
113
- # # guidance_scale=3.5,
114
- # # height=control_image.size[1],
115
- # # width=control_image.size[0],
116
- # # generator=generator,
117
- # # ).images[0]
118
-
119
- # # if was_resized:
120
- # # gr.Info(
121
- # # f"Resizing output image to targeted {w_original * upscale_factor}x{h_original * upscale_factor} size."
122
- # # )
123
-
124
- # # # resize to target desired size
125
- # # image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
126
- # # image.save("output.jpg")
127
- # # # convert to numpy
128
- # # return [true_input_image, image, seed]
129
-
130
-
131
- # # with gr.Blocks(css=css) as demo:
132
- # # # with gr.Column(elem_id="col-container"):
133
- # # gr.Markdown(
134
- # # f"""
135
- # # # ⚡ Flux.1-dev Upscaler ControlNet ⚡
136
- # # This is an interactive demo of [Flux.1-dev Upscaler ControlNet](https://huggingface.co/jasperai/Flux.1-dev-Controlnet-Upscaler) taking as input a low resolution image to generate a high resolution image.
137
- # # Currently running on {power_device}.
138
-
139
- # # *Note*: Even though the model can handle higher resolution images, due to GPU memory constraints, this demo was limited to a generated output not exceeding a pixel budget of 1024x1024. If the requested size exceeds that limit, the input will be first resized keeping the aspect ratio such that the output of the controlNet model does not exceed the allocated pixel budget. The output is then resized to the targeted shape using a simple resizing. This may explain some artifacts for high resolution input. To adress this, run the demo locally or consider implementing a tiling strategy. Happy upscaling! 🚀
140
- # # """
141
- # # )
142
-
143
- # # with gr.Row():
144
- # # run_button = gr.Button(value="Run")
145
-
146
- # # with gr.Row():
147
- # # with gr.Column(scale=4):
148
- # # input_im = gr.Image(label="Input Image", type="pil")
149
- # # with gr.Column(scale=1):
150
- # # num_inference_steps = gr.Slider(
151
- # # label="Number of Inference Steps",
152
- # # minimum=8,
153
- # # maximum=50,
154
- # # step=1,
155
- # # value=28,
156
- # # )
157
- # # upscale_factor = gr.Slider(
158
- # # label="Upscale Factor",
159
- # # minimum=1,
160
- # # maximum=4,
161
- # # step=1,
162
- # # value=4,
163
- # # )
164
- # # controlnet_conditioning_scale = gr.Slider(
165
- # # label="Controlnet Conditioning Scale",
166
- # # minimum=0.1,
167
- # # maximum=1.5,
168
- # # step=0.1,
169
- # # value=0.6,
170
- # # )
171
- # # seed = gr.Slider(
172
- # # label="Seed",
173
- # # minimum=0,
174
- # # maximum=MAX_SEED,
175
- # # step=1,
176
- # # value=42,
177
- # # )
178
-
179
- # # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
180
-
181
- # # with gr.Row():
182
- # # result = ImageSlider(label="Input / Output", type="pil", interactive=True)
183
-
184
- # # examples = gr.Examples(
185
- # # examples=[
186
- # # # [42, False, "examples/image_1.jpg", 28, 4, 0.6],
187
- # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
188
- # # # [42, False, "examples/image_3.jpg", 28, 4, 0.6],
189
- # # [42, False, "examples/image_4.jpg", 28, 4, 0.6],
190
- # # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
191
- # # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
192
- # # ],
193
- # # inputs=[
194
- # # seed,
195
- # # randomize_seed,
196
- # # input_im,
197
- # # num_inference_steps,
198
- # # upscale_factor,
199
- # # controlnet_conditioning_scale,
200
- # # ],
201
- # # fn=infer,
202
- # # outputs=result,
203
- # # cache_examples="lazy",
204
- # # )
205
-
206
- # # # examples = gr.Examples(
207
- # # # examples=[
208
- # # # #[42, False, "examples/image_1.jpg", 28, 4, 0.6],
209
- # # # [42, False, "examples/image_2.jpg", 28, 4, 0.6],
210
- # # # #[42, False, "examples/image_3.jpg", 28, 4, 0.6],
211
- # # # #[42, False, "examples/image_4.jpg", 28, 4, 0.6],
212
- # # # [42, False, "examples/image_5.jpg", 28, 4, 0.6],
213
- # # # [42, False, "examples/image_6.jpg", 28, 4, 0.6],
214
- # # # [42, False, "examples/image_7.jpg", 28, 4, 0.6],
215
- # # # ],
216
- # # # inputs=[
217
- # # # seed,
218
- # # # randomize_seed,
219
- # # # input_im,
220
- # # # num_inference_steps,
221
- # # # upscale_factor,
222
- # # # controlnet_conditioning_scale,
223
- # # # ],
224
- # # # )
225
-
226
- # # gr.Markdown("**Disclaimer:**")
227
- # # gr.Markdown(
228
- # # "This demo is only for research purpose. Jasper cannot be held responsible for the generation of NSFW (Not Safe For Work) content through the use of this demo. Users are solely responsible for any content they create, and it is their obligation to ensure that it adheres to appropriate and ethical standards. Jasper provides the tools, but the responsibility for their use lies with the individual user."
229
- # # )
230
- # # gr.on(
231
- # # [run_button.click],
232
- # # fn=infer,
233
- # # inputs=[
234
- # # seed,
235
- # # randomize_seed,
236
- # # input_im,
237
- # # num_inference_steps,
238
- # # upscale_factor,
239
- # # controlnet_conditioning_scale,
240
- # # ],
241
- # # outputs=result,
242
- # # show_api=False,
243
- # # # show_progress="minimal",
244
- # # )
245
-
246
- # # demo.queue().launch(share=False, show_api=False)
247
-
248
-
249
-
250
-
251
-
252
-
253
- # import logging
254
- # import random
255
- # import warnings
256
- # import os,shutil,subprocess
257
- # import torch
258
- # import numpy as np
259
- # from diffusers import FluxControlNetModel
260
- # from diffusers.pipelines import FluxControlNetPipeline
261
- # from PIL import Image
262
- # from huggingface_hub import snapshot_download,login
263
- # import io
264
- # import base64
265
- # from flask import Flask, request, jsonify
266
- # from concurrent.futures import ThreadPoolExecutor
267
- # from flask_cors import CORS
268
- # from tqdm import tqdm
269
-
270
- # app = Flask(__name__)
271
- # CORS(app)
272
-
273
- # # Function to check disk usage
274
- # def check_disk_space():
275
- # result = subprocess.run(['df', '-h'], capture_output=True, text=True)
276
- # print(result.stdout)
277
-
278
- # # Function to clear Hugging Face cache
279
- # def clear_huggingface_cache():
280
- # cache_dir = os.path.expanduser('~/.cache/huggingface')
281
- # if os.path.exists(cache_dir):
282
- # shutil.rmtree(cache_dir) # Removes the entire cache directory
283
- # print(f"Cleared Hugging Face cache at: {cache_dir}")
284
- # else:
285
- # print("No Hugging Face cache found.")
286
-
287
- # # Check disk space
288
- # check_disk_space()
289
-
290
- # # Clear Hugging Face cache
291
- # clear_huggingface_cache()
292
-
293
- # # Add config to store base64 images
294
- # app.config['image_outputs'] = {}
295
-
296
- # # ThreadPoolExecutor for managing image processing threads
297
- # executor = ThreadPoolExecutor()
298
-
299
- # # Determine the device (GPU or CPU)
300
- # if torch.cuda.is_available():
301
- # device = "cuda"
302
- # else:
303
- # device = "cpu"
304
-
305
- # # Load model from Huggingface Hub
306
- # huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
307
- # if huggingface_token:
308
- # login(token=huggingface_token)
309
- # else:
310
- # print("Hugging Face token not found in environment variables.")
311
- # print(huggingface_token)
312
- # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
313
- # model_path = snapshot_download(
314
- # repo_id="black-forest-labs/FLUX.1-dev",
315
- # repo_type="model",
316
- # ignore_patterns=["*.md", "*..gitattributes"],
317
- # local_dir="FLUX.1-dev",
318
- # token=huggingface_token)
319
-
320
- # # Load pipeline
321
- # print('controlnet enters')
322
- # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
323
- # controlnet = FluxControlNetModel.from_pretrained(
324
- # "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
325
- # ).to(device)
326
- # print('controlnet exits')
327
- # print('pipe enters')
328
- # with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
329
- # pipe = FluxControlNetPipeline.from_pretrained(
330
- # model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
331
- # ).to(device)
332
- # # pipe.to(device)
333
- # print('pipe exits')
334
-
335
- # MAX_SEED = 1000000
336
- # MAX_PIXEL_BUDGET = 1024 * 1024
337
-
338
- # def process_input(input_image, upscale_factor):
339
- # w, h = input_image.size
340
- # aspect_ratio = w / h
341
- # was_resized = False
342
-
343
- # # Resize if input size exceeds the maximum pixel budget
344
- # if w * h * upscale_factor**2 > MAX_PIXEL_BUDGET:
345
- # warnings.warn(f"Requested output image is too large. Resizing to fit within pixel budget.")
346
- # input_image = input_image.resize(
347
- # (
348
- # int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
349
- # int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
350
- # )
351
- # )
352
- # was_resized = True
353
-
354
- # # Adjust dimensions to be a multiple of 8
355
- # w, h = input_image.size
356
- # w = w - w % 8
357
- # h = h - h % 8
358
-
359
- # return input_image.resize((w, h)), was_resized
360
-
361
- # def run_inference(process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale):
362
- # input_image, was_resized = process_input(input_image, upscale_factor)
363
-
364
- # # Rescale image for ControlNet processing
365
- # w, h = input_image.size
366
- # control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
367
-
368
- # # Set the random generator for inference
369
- # generator = torch.Generator().manual_seed(seed)
370
-
371
- # # Perform inference using the pipeline
372
- # image = pipe(
373
- # prompt="",
374
- # control_image=control_image,
375
- # controlnet_conditioning_scale=controlnet_conditioning_scale,
376
- # num_inference_steps=num_inference_steps,
377
- # guidance_scale=3.5,
378
- # height=control_image.size[1],
379
- # width=control_image.size[0],
380
- # generator=generator,
381
- # ).images[0]
382
-
383
- # # Resize output image back to the original dimensions if needed
384
- # if was_resized:
385
- # original_size = (input_image.width * upscale_factor, input_image.height * upscale_factor)
386
- # image = image.resize(original_size)
387
-
388
- # # Convert the output image to base64
389
- # buffered = io.BytesIO()
390
- # image.save(buffered, format="JPEG")
391
- # image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
392
-
393
- # # Store the result in the shared dictionary
394
- # app.config['image_outputs'][process_id] = image_base64
395
-
396
- # @app.route('/infer', methods=['POST'])
397
- # def infer():
398
- # data = request.json
399
- # seed = data.get("seed", 42)
400
- # randomize_seed = data.get("randomize_seed", True)
401
- # num_inference_steps = data.get("num_inference_steps", 28)
402
- # upscale_factor = data.get("upscale_factor", 4)
403
- # controlnet_conditioning_scale = data.get("controlnet_conditioning_scale", 0.6)
404
-
405
- # # Randomize seed if specified
406
- # if randomize_seed:
407
- # seed = random.randint(0, MAX_SEED)
408
-
409
- # # Load and process the input image
410
- # input_image_data = base64.b64decode(data['input_image'])
411
- # input_image = Image.open(io.BytesIO(input_image_data))
412
-
413
- # # Create a unique process ID for this request
414
- # process_id = str(random.randint(1000, 9999))
415
-
416
- # # Set the status to 'in_progress'
417
- # app.config['image_outputs'][process_id] = None
418
-
419
- # # Run the inference in a separate thread
420
- # executor.submit(run_inference, process_id, input_image, upscale_factor, seed, num_inference_steps, controlnet_conditioning_scale)
421
-
422
- # # Return the process ID
423
- # return jsonify({
424
- # "process_id": process_id,
425
- # "message": "Processing started"
426
- # })
427
-
428
- # # Modify status endpoint to receive process_id in request body
429
- # @app.route('/status', methods=['POST'])
430
- # def status():
431
- # data = request.json
432
- # process_id = data.get('process_id')
433
-
434
- # # Check if process_id was provided
435
- # if not process_id:
436
- # return jsonify({
437
- # "status": "error",
438
- # "message": "Process ID is required"
439
- # }), 400
440
-
441
- # # Check if the process_id exists in the dictionary
442
- # if process_id not in app.config['image_outputs']:
443
- # return jsonify({
444
- # "status": "error",
445
- # "message": "Invalid process ID"
446
- # }), 404
447
-
448
- # # Check the status of the image processing
449
- # image_base64 = app.config['image_outputs'][process_id]
450
- # if image_base64 is None:
451
- # return jsonify({
452
- # "status": "in_progress"
453
- # })
454
- # else:
455
- # return jsonify({
456
- # "status": "completed",
457
- # "output_image": image_base64
458
- # })
459
-
460
- # if __name__ == '__main__':
461
- # app.run(debug=True)
462
-
463
-
464
-
465
-
466
-
467
  import logging
468
  import random
469
  import warnings
@@ -535,8 +69,8 @@ else:
535
  logger.info("Hugging Face token: %s", huggingface_token)
536
 
537
  # Download model using snapshot_download
538
- with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
539
- model_path = snapshot_download(
540
  repo_id="black-forest-labs/FLUX.1-dev",
541
  repo_type="model",
542
  ignore_patterns=["*.md", "*..gitattributes"],
@@ -546,15 +80,15 @@ with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}
546
 
547
  # Load pipeline
548
  logger.info('Loading ControlNet model.')
549
- with tqdm(total=100, desc="Downloading ControlNet model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
550
- controlnet = FluxControlNetModel.from_pretrained(
551
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
552
  ).to(device)
553
  logger.info("ControlNet model loaded successfully.")
554
 
555
  logger.info('Loading pipeline.')
556
- with tqdm(total=100, desc="Downloading pipeline", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
557
- pipe = FluxControlNetPipeline.from_pretrained(
558
  model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
559
  ).to(device)
560
  logger.info("Pipeline loaded successfully.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import logging
2
  import random
3
  import warnings
 
69
  logger.info("Hugging Face token: %s", huggingface_token)
70
 
71
  # Download model using snapshot_download
72
+ #with tqdm(total=100, desc="Downloading model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
73
+ model_path = snapshot_download(
74
  repo_id="black-forest-labs/FLUX.1-dev",
75
  repo_type="model",
76
  ignore_patterns=["*.md", "*..gitattributes"],
 
80
 
81
  # Load pipeline
82
  logger.info('Loading ControlNet model.')
83
+ #with tqdm(total=100, desc="Downloading ControlNet model", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
84
+ controlnet = FluxControlNetModel.from_pretrained(
85
  "jasperai/Flux.1-dev-Controlnet-Upscaler", torch_dtype=torch.bfloat16
86
  ).to(device)
87
  logger.info("ControlNet model loaded successfully.")
88
 
89
  logger.info('Loading pipeline.')
90
+ #with tqdm(total=100, desc="Downloading pipeline", bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}") as pbar:
91
+ pipe = FluxControlNetPipeline.from_pretrained(
92
  model_path, controlnet=controlnet, torch_dtype=torch.bfloat16
93
  ).to(device)
94
  logger.info("Pipeline loaded successfully.")