mrbeliever commited on
Commit
75f921d
1 Parent(s): 0aee8d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -104
app.py CHANGED
@@ -2,7 +2,6 @@ import spaces
2
  import gradio as gr
3
  import re
4
  from PIL import Image
5
-
6
  import os
7
  import numpy as np
8
  import torch
@@ -13,24 +12,16 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
  pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
15
 
16
-
17
-
18
  def sanitize_prompt(prompt):
19
- # Allow only alphanumeric characters, spaces, and basic punctuation
20
- allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
21
- sanitized_prompt = allowed_chars.sub("", prompt)
22
- return sanitized_prompt
23
 
24
- def convert_to_fit_size(original_width_and_height, maximum_size = 2048):
25
- width, height =original_width_and_height
26
  if width <= maximum_size and height <= maximum_size:
27
- return width,height
28
-
29
- if width > height:
30
- scaling_factor = maximum_size / width
31
- else:
32
- scaling_factor = maximum_size / height
33
-
34
  new_width = int(width * scaling_factor)
35
  new_height = int(height * scaling_factor)
36
  return new_width, new_height
@@ -40,78 +31,45 @@ def adjust_to_multiple_of_32(width: int, height: int):
40
  height = height - (height % 32)
41
  return width, height
42
 
43
-
44
-
45
-
46
  @spaces.GPU(duration=120)
47
- def process_images(image,prompt="a girl",strength=0.75,seed=0,inference_step=4,progress=gr.Progress(track_tqdm=True)):
48
- #print("start process_images")
49
  progress(0, desc="Starting")
50
-
51
-
52
- def process_img2img(image,prompt="a person",strength=0.75,seed=0,num_inference_steps=4):
53
- #print("start process_img2img")
54
- if image == None:
55
- print("empty input image returned")
56
  return None
57
-
58
- generators = []
59
  generator = torch.Generator(device).manual_seed(seed)
60
- generators.append(generator)
61
- width,height = convert_to_fit_size(image.size)
62
- #print(f"fit {width}x{height}")
63
- width,height = adjust_to_multiple_of_32(width,height)
64
- #print(f"multiple {width}x{height}")
65
  image = image.resize((width, height), Image.LANCZOS)
66
- #mask_image = mask_image.resize((width, height), Image.NEAREST)
67
-
68
- # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
69
- #print(prompt)
70
- output = pipe(prompt=prompt, image=image,generator=generator,strength=strength,width=width,height=height
71
- ,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
72
-
73
- # TODO support mask
74
  return output.images[0]
75
-
76
- output = process_img2img(image,prompt,strength,seed,inference_step)
77
-
78
- #print("end process_images")
79
  return output
80
-
81
 
82
  def read_file(path: str) -> str:
83
  with open(path, 'r', encoding='utf-8') as f:
84
  content = f.read()
85
-
86
  return content
87
 
88
-
89
- css="""
90
- #col-left {
91
- margin: 0 auto;
92
- max-width: 640px;
93
- }
94
- #col-right {
95
  margin: 0 auto;
96
  max-width: 640px;
97
  }
98
  .grid-container {
99
- display: flex;
100
- align-items: center;
101
- justify-content: center;
102
- gap:10px
103
  }
104
-
105
  .image {
106
- width: 128px;
107
- height: 128px;
108
- object-fit: cover;
109
  }
110
-
111
  .text {
112
- font-size: 16px;
113
  }
114
-
115
  """
116
 
117
  with gr.Blocks(css=css, elem_id="demo-container") as demo:
@@ -119,49 +77,35 @@ with gr.Blocks(css=css, elem_id="demo-container") as demo:
119
  gr.HTML(read_file("demo_header.html"))
120
  gr.HTML(read_file("demo_tools.html"))
121
  with gr.Row():
122
- with gr.Column():
123
- image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
124
- with gr.Row(elem_id="prompt-container", equal_height=False):
125
- with gr.Row():
126
- prompt = gr.Textbox(label="Prompt",value="a women",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
127
-
128
- btn = gr.Button("Img2Img", elem_id="run_button",variant="primary")
129
-
130
- with gr.Accordion(label="Advanced Settings", open=False):
131
- with gr.Row( equal_height=True):
132
- strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="strength")
133
- seed = gr.Number(value=100, minimum=0, step=1, label="seed")
134
- inference_step = gr.Number(value=4, minimum=1, step=4, label="inference_step")
135
- id_input=gr.Text(label="Name", visible=False)
136
-
137
- with gr.Column():
138
- image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="jpg")
139
-
140
-
141
-
142
-
143
 
144
  gr.Examples(
145
- examples=[
146
- ["examples/draw_input.jpg", "examples/draw_output.jpg","a women ,eyes closed,mouth opened"],
147
- ["examples/draw-gimp_input.jpg", "examples/draw-gimp_output.jpg","a women ,eyes closed,mouth opened"],
148
- ["examples/gimp_input.jpg", "examples/gimp_output.jpg","a women ,hand on neck"],
149
- ["examples/inpaint_input.jpg", "examples/inpaint_output.jpg","a women ,hand on neck"]
150
- ]
151
- ,
152
- inputs=[image,image_out,prompt],
153
- )
154
- gr.HTML(
155
- gr.HTML(read_file("demo_footer.html"))
156
  )
 
 
 
157
  gr.on(
158
  triggers=[btn.click, prompt.submit],
159
- fn = process_images,
160
- inputs = [image,prompt,strength,seed,inference_step],
161
- outputs = [image_out]
162
  )
163
 
164
  if __name__ == "__main__":
165
- demo.launch()
166
-
167
-
 
2
  import gradio as gr
3
  import re
4
  from PIL import Image
 
5
  import os
6
  import numpy as np
7
  import torch
 
12
 
13
  pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
14
 
 
 
15
  def sanitize_prompt(prompt):
16
+ allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
17
+ sanitized_prompt = allowed_chars.sub("", prompt)
18
+ return sanitized_prompt
 
19
 
20
+ def convert_to_fit_size(original_width_and_height, maximum_size=2048):
21
+ width, height = original_width_and_height
22
  if width <= maximum_size and height <= maximum_size:
23
+ return width, height
24
+ scaling_factor = maximum_size / max(width, height)
 
 
 
 
 
25
  new_width = int(width * scaling_factor)
26
  new_height = int(height * scaling_factor)
27
  return new_width, new_height
 
31
  height = height - (height % 32)
32
  return width, height
33
 
 
 
 
34
  @spaces.GPU(duration=120)
35
+ def process_images(image, prompt="a girl", strength=0.75, seed=0, inference_step=4, progress=gr.Progress(track_tqdm=True)):
 
36
  progress(0, desc="Starting")
37
+ def process_img2img(image, prompt="a person", strength=0.75, seed=0, num_inference_steps=4):
38
+ if image is None:
 
 
 
 
39
  return None
 
 
40
  generator = torch.Generator(device).manual_seed(seed)
41
+ width, height = convert_to_fit_size(image.size)
42
+ width, height = adjust_to_multiple_of_32(width, height)
 
 
 
43
  image = image.resize((width, height), Image.LANCZOS)
44
+ output = pipe(prompt=prompt, image=image, generator=generator, strength=strength, width=width, height=height, guidance_scale=0, num_inference_steps=num_inference_steps, max_sequence_length=256)
 
 
 
 
 
 
 
45
  return output.images[0]
46
+ output = process_img2img(image, prompt, strength, seed, inference_step)
 
 
 
47
  return output
 
48
 
49
  def read_file(path: str) -> str:
50
  with open(path, 'r', encoding='utf-8') as f:
51
  content = f.read()
 
52
  return content
53
 
54
+ css = """
55
+ #col-left, #col-right {
 
 
 
 
 
56
  margin: 0 auto;
57
  max-width: 640px;
58
  }
59
  .grid-container {
60
+ display: flex;
61
+ align-items: center;
62
+ justify-content: center;
63
+ gap: 10px;
64
  }
 
65
  .image {
66
+ width: 256px;
67
+ height: 256px;
68
+ object-fit: cover;
69
  }
 
70
  .text {
71
+ font-size: 16px;
72
  }
 
73
  """
74
 
75
  with gr.Blocks(css=css, elem_id="demo-container") as demo:
 
77
  gr.HTML(read_file("demo_header.html"))
78
  gr.HTML(read_file("demo_tools.html"))
79
  with gr.Row():
80
+ with gr.Column():
81
+ image = gr.Image(width=256, height=256, sources=['upload', 'clipboard'], image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
82
+ prompt = gr.Textbox(label="Prompt", value="a woman", placeholder="Your prompt", elem_id="prompt")
83
+ btn = gr.Button("Generate", elem_id="run_button", variant="primary")
84
+ with gr.Accordion(label="Advanced Settings", open=False):
85
+ strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="Strength")
86
+ seed = gr.Number(value=100, minimum=0, step=1, label="Seed")
87
+ inference_step = gr.Number(value=4, minimum=1, step=4, label="Inference Steps")
88
+ with gr.Column():
89
+ image_out = gr.Image(width=256, height=256, label="Output", elem_id="output-img", format="jpg")
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  gr.Examples(
92
+ examples=[
93
+ ["examples/draw_input.jpg", "examples/draw_output.jpg", "a woman, eyes closed, mouth open"],
94
+ ["examples/draw-gimp_input.jpg", "examples/draw-gimp_output.jpg", "a woman, eyes closed, mouth open"],
95
+ ["examples/gimp_input.jpg", "examples/gimp_output.jpg", "a woman, hand on neck"],
96
+ ["examples/inpaint_input.jpg", "examples/inpaint_output.jpg", "a woman, hand on neck"]
97
+ ],
98
+ inputs=[image, image_out, prompt],
 
 
 
 
99
  )
100
+
101
+ gr.HTML(gr.HTML(read_file("demo_footer.html")))
102
+
103
  gr.on(
104
  triggers=[btn.click, prompt.submit],
105
+ fn=process_images,
106
+ inputs=[image, prompt, strength, seed, inference_step],
107
+ outputs=[image_out]
108
  )
109
 
110
  if __name__ == "__main__":
111
+ demo.launch()