SkalskiP commited on
Commit
bf673e1
·
1 Parent(s): fe679da
Files changed (1) hide show
  1. app.py +26 -20
app.py CHANGED
@@ -23,13 +23,18 @@ for taking it to the next level by enabling inpainting with the FLUX.
23
 
24
  MAX_SEED = np.iinfo(np.int32).max
25
  IMAGE_SIZE = 1024
 
26
  DEVICE = torch.device("cpu")
27
 
 
 
 
 
 
28
  FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE)
29
  SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE)
30
- FLUX_INPAINTING_PIPELINE = FluxInpaintPipeline.from_pretrained(
31
- "black-forest-labs/FLUX.1-schnell",
32
- torch_dtype=torch.bfloat16).to(torch.device("cuda"))
33
 
34
 
35
  def resize_image_dimensions(
@@ -60,6 +65,7 @@ def is_image_empty(image: Image.Image) -> bool:
60
 
61
  @spaces.GPU()
62
  @torch.inference_mode()
 
63
  def process(
64
  input_image_editor: dict,
65
  inpainting_prompt_text: str,
@@ -122,23 +128,23 @@ def process(
122
  mask = mask.resize((width, height), Image.LANCZOS)
123
  mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
124
 
125
- # return image, mask
126
-
127
- if randomize_seed_checkbox:
128
- seed_slicer = random.randint(0, MAX_SEED)
129
- generator = torch.Generator().manual_seed(seed_slicer)
130
- result = FLUX_INPAINTING_PIPELINE(
131
- prompt=inpainting_prompt_text,
132
- image=image,
133
- mask_image=mask,
134
- width=width,
135
- height=height,
136
- strength=strength_slider,
137
- generator=generator,
138
- num_inference_steps=num_inference_steps_slider
139
- ).images[0]
140
- print('INFERENCE DONE')
141
- return result, mask
142
 
143
 
144
  with gr.Blocks() as demo:
 
23
 
24
  MAX_SEED = np.iinfo(np.int32).max
25
  IMAGE_SIZE = 1024
26
+ # DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
27
  DEVICE = torch.device("cpu")
28
 
29
+ # torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
30
+ # if torch.cuda.get_device_properties(0).major >= 8:
31
+ # torch.backends.cuda.matmul.allow_tf32 = True
32
+ # torch.backends.cudnn.allow_tf32 = True
33
+
34
  FLORENCE_MODEL, FLORENCE_PROCESSOR = load_florence_model(device=DEVICE)
35
  SAM_IMAGE_MODEL = load_sam_image_model(device=DEVICE)
36
+ # FLUX_INPAINTING_PIPELINE = FluxInpaintPipeline.from_pretrained(
37
+ # "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
 
38
 
39
 
40
  def resize_image_dimensions(
 
65
 
66
  @spaces.GPU()
67
  @torch.inference_mode()
68
+ # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
69
  def process(
70
  input_image_editor: dict,
71
  inpainting_prompt_text: str,
 
128
  mask = mask.resize((width, height), Image.LANCZOS)
129
  mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
130
 
131
+ return image, mask
132
+
133
+ # if randomize_seed_checkbox:
134
+ # seed_slicer = random.randint(0, MAX_SEED)
135
+ # generator = torch.Generator().manual_seed(seed_slicer)
136
+ # result = FLUX_INPAINTING_PIPELINE(
137
+ # prompt=inpainting_prompt_text,
138
+ # image=image,
139
+ # mask_image=mask,
140
+ # width=width,
141
+ # height=height,
142
+ # strength=strength_slider,
143
+ # generator=generator,
144
+ # num_inference_steps=num_inference_steps_slider
145
+ # ).images[0]
146
+ # print('INFERENCE DONE')
147
+ # return result, mask
148
 
149
 
150
  with gr.Blocks() as demo: