kevinantony commited on
Commit
02cc4a8
·
verified ·
1 Parent(s): 7c6f645

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -67
app.py CHANGED
@@ -9,13 +9,15 @@ import torch
9
  from PIL import Image
10
  from diffusers import FluxInpaintPipeline
11
 
 
 
12
  MARKDOWN = """
13
- # FLUX.1 Inpainting using
14
- [Black Forest Labs](https://huggingface.co/black-forest-labs)' "FLUX.1-dev"
15
  """
16
 
17
  MAX_SEED = np.iinfo(np.int32).max
18
- IMAGE_SIZE = 1024
19
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
20
 
21
 
@@ -34,32 +36,32 @@ def remove_background(image: Image.Image, threshold: int = 50) -> Image.Image:
34
  return image
35
 
36
 
37
- EXAMPLES = [
38
- [
39
- {
40
- "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
41
- "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw))],
42
- "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
43
- },
44
- "little lion",
45
- 42,
46
- False,
47
- 0.85,
48
- 30
49
- ],
50
- [
51
- {
52
- "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
53
- "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw))],
54
- "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
55
- },
56
- "tribal tattoos",
57
- 42,
58
- False,
59
- 0.85,
60
- 30
61
- ]
62
- ]
63
 
64
  pipe = FluxInpaintPipeline.from_pretrained(
65
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
@@ -122,17 +124,18 @@ def process(
122
  if randomize_seed_checkbox:
123
  seed_slicer = random.randint(0, MAX_SEED)
124
  generator = torch.Generator().manual_seed(seed_slicer)
125
- result = pipe(
126
- prompt=input_text,
127
- image=resized_image,
128
- mask_image=resized_mask,
129
- width=width,
130
- height=height,
131
- strength=strength_slider,
132
- generator=generator,
133
- num_inference_steps=num_inference_steps_slider
134
- ).images[0]
135
- print('INFERENCE DONE')
 
136
  return result, resized_mask
137
 
138
 
@@ -174,9 +177,9 @@ with gr.Blocks() as demo:
174
  with gr.Row():
175
  strength_slider_component = gr.Slider(
176
  label="Strength",
177
- info="Indicates extent to transform the reference `image`. "
178
- "Must be between 0 and 1. `image` is used as a starting "
179
- "point and more noise is added the higher the `strength`.",
180
  minimum=0,
181
  maximum=1,
182
  step=0.01,
@@ -185,10 +188,10 @@ with gr.Blocks() as demo:
185
 
186
  num_inference_steps_slider_component = gr.Slider(
187
  label="Number of inference steps",
188
- info="The number of denoising steps. More denoising steps "
189
- "usually lead to a higher quality image at the",
190
  minimum=1,
191
- maximum=50,
192
  step=1,
193
  value=20,
194
  )
@@ -198,25 +201,25 @@ with gr.Blocks() as demo:
198
  with gr.Accordion("Debug", open=False):
199
  output_mask_component = gr.Image(
200
  type='pil', image_mode='RGB', label='Input mask', format="png")
201
- with gr.Row():
202
- gr.Examples(
203
- fn=process,
204
- examples=EXAMPLES,
205
- inputs=[
206
- input_image_editor_component,
207
- input_text_component,
208
- seed_slicer_component,
209
- randomize_seed_checkbox_component,
210
- strength_slider_component,
211
- num_inference_steps_slider_component
212
- ],
213
- outputs=[
214
- output_image_component,
215
- output_mask_component
216
- ],
217
- run_on_click=True,
218
- cache_examples=True
219
- )
220
 
221
  submit_button_component.click(
222
  fn=process,
@@ -234,5 +237,4 @@ with gr.Blocks() as demo:
234
  ]
235
  )
236
 
237
- demo.launch(debug=False, show_error=True)
238
-
 
9
  from PIL import Image
10
  from diffusers import FluxInpaintPipeline
11
 
12
+ torch.cuda.empty_cache()
13
+
14
  MARKDOWN = """
15
+ # FLUX Inpainting
16
+ Model used FLUX.1-schnell.
17
  """
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
+ IMAGE_SIZE = 512
21
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
22
 
23
 
 
36
  return image
37
 
38
 
39
+ # EXAMPLES = [
40
+ # [
41
+ # {
42
+ # "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
43
+ # "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2.png", stream=True).raw))],
44
+ # "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
45
+ # },
46
+ # "little lion",
47
+ # 42,
48
+ # False,
49
+ # 0.85,
50
+ # 30
51
+ # ],
52
+ # [
53
+ # {
54
+ # "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
55
+ # "layers": [remove_background(Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3.png", stream=True).raw))],
56
+ # "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
57
+ # },
58
+ # "tribal tattoos",
59
+ # 42,
60
+ # False,
61
+ # 0.85,
62
+ # 30
63
+ # ]
64
+ # ]
65
 
66
  pipe = FluxInpaintPipeline.from_pretrained(
67
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
 
124
  if randomize_seed_checkbox:
125
  seed_slicer = random.randint(0, MAX_SEED)
126
  generator = torch.Generator().manual_seed(seed_slicer)
127
+ with torch.no_grad(), torch.autocast("cuda"):
128
+ result = pipe(
129
+ prompt=input_text,
130
+ image=resized_image,
131
+ mask_image=resized_mask,
132
+ width=width,
133
+ height=height,
134
+ strength=strength_slider,
135
+ generator=generator,
136
+ num_inference_steps=num_inference_steps_slider
137
+ ).images[0]
138
+ torch.cuda.empty_cache()
139
  return result, resized_mask
140
 
141
 
 
177
  with gr.Row():
178
  strength_slider_component = gr.Slider(
179
  label="Strength",
180
+ # info="Indicates extent to transform the reference `image`. "
181
+ # "Must be between 0 and 1. `image` is used as a starting "
182
+ # "point and more noise is added the higher the `strength`.",
183
  minimum=0,
184
  maximum=1,
185
  step=0.01,
 
188
 
189
  num_inference_steps_slider_component = gr.Slider(
190
  label="Number of inference steps",
191
+ # info="The number of denoising steps. More denoising steps "
192
+ # "usually lead to a higher quality image at the",
193
  minimum=1,
194
+ maximum=20,
195
  step=1,
196
  value=20,
197
  )
 
201
  with gr.Accordion("Debug", open=False):
202
  output_mask_component = gr.Image(
203
  type='pil', image_mode='RGB', label='Input mask', format="png")
204
+ # with gr.Row():
205
+ # gr.Examples(
206
+ # fn=process,
207
+ # examples=EXAMPLES,
208
+ # inputs=[
209
+ # input_image_editor_component,
210
+ # input_text_component,
211
+ # seed_slicer_component,
212
+ # randomize_seed_checkbox_component,
213
+ # strength_slider_component,
214
+ # num_inference_steps_slider_component
215
+ # ],
216
+ # outputs=[
217
+ # output_image_component,
218
+ # output_mask_component
219
+ # ],
220
+ # run_on_click=True,
221
+ # cache_examples=True
222
+ # )
223
 
224
  submit_button_component.click(
225
  fn=process,
 
237
  ]
238
  )
239
 
240
+ demo.launch(debug=False, show_error=True,share= True)