Spaces:
Running
Running
hatmanstack
commited on
Commit
·
337cf72
1
Parent(s):
8a17e3a
reworked inpaint/mask, color guide, explainations
Browse files- app.py +64 -51
- functions.py +112 -82
- generate.py +1 -1
app.py
CHANGED
@@ -13,6 +13,11 @@ class Config:
|
|
13 |
|
14 |
config = Config()
|
15 |
|
|
|
|
|
|
|
|
|
|
|
16 |
def create_advanced_options():
|
17 |
|
18 |
negative_text = gr.Textbox(label="Negative Prompt", placeholder="Describe what not to include (1-1024 characters)", max_lines=1)
|
@@ -40,6 +45,7 @@ with gr.Blocks() as demo:
|
|
40 |
justify-content: center !important;
|
41 |
width: 100% !important;
|
42 |
}
|
|
|
43 |
</style>
|
44 |
""")
|
45 |
gr.Markdown("<h1>Amazon Nova Canvas Image Generation</h1>", elem_classes="center-markdown" )
|
@@ -47,10 +53,8 @@ with gr.Blocks() as demo:
|
|
47 |
with gr.Tab("Text to Image"):
|
48 |
with gr.Column():
|
49 |
gr.Markdown("""
|
50 |
-
|
51 |
-
|
52 |
-
</div>
|
53 |
-
""")
|
54 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
55 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
56 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
@@ -62,58 +66,69 @@ with gr.Blocks() as demo:
|
|
62 |
with gr.Tab("Inpainting"):
|
63 |
with gr.Column():
|
64 |
gr.Markdown("""
|
65 |
-
|
66 |
-
Modify specific areas of your image using inpainting. Upload your
|
67 |
-
You can use
|
68 |
-
|
69 |
-
|
70 |
-
""")
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
79 |
output = gr.Image()
|
80 |
with gr.Accordion("Advanced Options", open=False):
|
81 |
negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
|
82 |
|
83 |
-
gr.Button("Generate").click(inpainting, inputs=[
|
84 |
|
85 |
with gr.Tab("Outpainting"):
|
86 |
with gr.Column():
|
87 |
gr.Markdown("""
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
103 |
output = gr.Image()
|
104 |
with gr.Accordion("Advanced Options", open=False):
|
105 |
outpainting_mode = gr.Radio(choices=["DEFAULT", "PRECISE"], value="DEFAULT", label="Outpainting Mode")
|
106 |
negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
|
107 |
|
108 |
-
gr.Button("Generate").click(outpainting, inputs=[
|
109 |
|
110 |
with gr.Tab("Image Variation"):
|
111 |
with gr.Column():
|
112 |
gr.Markdown("""
|
113 |
-
|
114 |
-
|
115 |
-
</div>
|
116 |
-
""")
|
117 |
images = gr.File(type='filepath', label="Input Images", file_count="multiple", file_types=["image"])
|
118 |
with gr.Accordion("Optional Prompt", open=False):
|
119 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
@@ -129,12 +144,10 @@ with gr.Blocks() as demo:
|
|
129 |
with gr.Tab("Image Conditioning"):
|
130 |
with gr.Column():
|
131 |
gr.Markdown("""
|
132 |
-
<div style="text-align: center;">
|
133 |
Generate an image conditioned by an input image. You need to add a text prompt to direct the model (required).
|
134 |
You have two modes to control the conditioning,"CANNY" and "SEGMENTATION". CANNY will follow the edges of the conditioning image closely.
|
135 |
SEGMENTATION will follow the layout or shapes of the conditioning image.
|
136 |
-
|
137 |
-
""")
|
138 |
condition_image = gr.Image(type='pil', label="Condition Image")
|
139 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
140 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
@@ -149,14 +162,16 @@ with gr.Blocks() as demo:
|
|
149 |
with gr.Tab("Color Guided"):
|
150 |
with gr.Column():
|
151 |
gr.Markdown("""
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
160 |
prompt = gr.Textbox(label="Text", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
161 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
162 |
with gr.Accordion("Optional Reference Image", open=False):
|
@@ -170,10 +185,8 @@ with gr.Blocks() as demo:
|
|
170 |
with gr.Tab("Background Removal"):
|
171 |
with gr.Column():
|
172 |
gr.Markdown("""
|
173 |
-
<div style="text-align: center;">
|
174 |
Remove the background from an image.
|
175 |
-
|
176 |
-
""")
|
177 |
image = gr.Image(type='pil', label="Input Image")
|
178 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
179 |
output = gr.Image()
|
|
|
13 |
|
14 |
config = Config()
|
15 |
|
16 |
+
def update_mask_editor(img):
|
17 |
+
if img['background'] is None:
|
18 |
+
return None
|
19 |
+
return create_padded_image(img)
|
20 |
+
|
21 |
def create_advanced_options():
|
22 |
|
23 |
negative_text = gr.Textbox(label="Negative Prompt", placeholder="Describe what not to include (1-1024 characters)", max_lines=1)
|
|
|
45 |
justify-content: center !important;
|
46 |
width: 100% !important;
|
47 |
}
|
48 |
+
|
49 |
</style>
|
50 |
""")
|
51 |
gr.Markdown("<h1>Amazon Nova Canvas Image Generation</h1>", elem_classes="center-markdown" )
|
|
|
53 |
with gr.Tab("Text to Image"):
|
54 |
with gr.Column():
|
55 |
gr.Markdown("""
|
56 |
+
Generate an image from a text prompt using the Amazon Nova Canvas model.
|
57 |
+
""", elem_classes="center-markdown")
|
|
|
|
|
58 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
59 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
60 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
|
|
66 |
with gr.Tab("Inpainting"):
|
67 |
with gr.Column():
|
68 |
gr.Markdown("""
|
69 |
+
|
70 |
+
Modify specific areas of your image using inpainting. Upload your base Image then choose one of two ways to specify the areas you want to edit:
|
71 |
+
You can use the in app editing tool to draw masks for areas to edit or use the Mask Prompt field to direct the model how to infer the mask. <b>ONLY
|
72 |
+
ONE</b> of these methods can be used at a time. Create an optional prompt to tell the model how to fill in the area you mask.
|
73 |
+
|
74 |
+
""", elem_classes="center-markdown")
|
75 |
+
mask_image = gr.ImageEditor(
|
76 |
+
type="pil",
|
77 |
+
height="100%",
|
78 |
+
width="100%",
|
79 |
+
crop_size="1:1",
|
80 |
+
brush={"color": "#000000", "radius": 25},
|
81 |
+
show_download_button=False,
|
82 |
+
show_share_button=False,
|
83 |
+
label="Draw mask (black areas will be edited)",
|
84 |
+
)
|
85 |
+
with gr.Accordion("Optional Mask Prompt", open=False):
|
86 |
+
mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
|
87 |
+
prompt = gr.Textbox(label="Optional Prompt", placeholder="Describe what to generate (1-1024 characters) in the masked area", max_lines=4)
|
88 |
+
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
89 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
90 |
output = gr.Image()
|
91 |
with gr.Accordion("Advanced Options", open=False):
|
92 |
negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
|
93 |
|
94 |
+
gr.Button("Generate").click(inpainting, inputs=[mask_image,mask_prompt, prompt, negative_text, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
|
95 |
|
96 |
with gr.Tab("Outpainting"):
|
97 |
with gr.Column():
|
98 |
gr.Markdown("""
|
99 |
+
Modify areas outside of your image using outpainting. Give the image a transparent border by adding padding then draw
|
100 |
+
a mask on the image or border where you would like the model to generate new content. The other option is to allow the model to infer the mask from the Mask Prompt. In options, you can choose to precisley follow the mask or transition smoothly
|
101 |
+
between the masked area and the non-masked area. Create an optional prompt to tell the model how to fill in the area you mask.
|
102 |
+
""", elem_classes="center-markdown")
|
103 |
+
mask_image = gr.ImageEditor(
|
104 |
+
type="pil",
|
105 |
+
height="100%",
|
106 |
+
width="100%",
|
107 |
+
crop_size="1:1",
|
108 |
+
brush={"color": "#000000", "radius": 25},
|
109 |
+
show_download_button=False,
|
110 |
+
show_share_button=False,
|
111 |
+
label="Draw mask (black areas will be edited)"
|
112 |
+
)
|
113 |
+
gr.Button("Create Padding").click(fn=update_mask_editor, inputs=[mask_image], outputs=[mask_image])
|
114 |
+
|
115 |
+
with gr.Accordion("Optional Mask Prompt", open=False):
|
116 |
+
mask_prompt = gr.Textbox(label="Mask Prompt", placeholder="Describe regions to edit", max_lines=1)
|
117 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Describe what to generate (1-1024 characters)", max_lines=4)
|
118 |
+
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
119 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
120 |
output = gr.Image()
|
121 |
with gr.Accordion("Advanced Options", open=False):
|
122 |
outpainting_mode = gr.Radio(choices=["DEFAULT", "PRECISE"], value="DEFAULT", label="Outpainting Mode")
|
123 |
negative_text, width, height, quality, cfg_scale, seed = create_advanced_options()
|
124 |
|
125 |
+
gr.Button("Generate").click(outpainting, inputs=[mask_image, mask_prompt, prompt, negative_text, outpainting_mode, height, width, quality, cfg_scale, seed], outputs=[output, error_box])
|
126 |
|
127 |
with gr.Tab("Image Variation"):
|
128 |
with gr.Column():
|
129 |
gr.Markdown("""
|
130 |
+
Create a variation image based on up to 5 other images and a Similarity slider available in options. You can add a prompt to direct the model (optional). Images should be .png or .jpg.
|
131 |
+
""", elem_classes="center-markdown")
|
|
|
|
|
132 |
images = gr.File(type='filepath', label="Input Images", file_count="multiple", file_types=["image"])
|
133 |
with gr.Accordion("Optional Prompt", open=False):
|
134 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
|
|
144 |
with gr.Tab("Image Conditioning"):
|
145 |
with gr.Column():
|
146 |
gr.Markdown("""
|
|
|
147 |
Generate an image conditioned by an input image. You need to add a text prompt to direct the model (required).
|
148 |
You have two modes to control the conditioning,"CANNY" and "SEGMENTATION". CANNY will follow the edges of the conditioning image closely.
|
149 |
SEGMENTATION will follow the layout or shapes of the conditioning image.
|
150 |
+
""", elem_classes="center-markdown")
|
|
|
151 |
condition_image = gr.Image(type='pil', label="Condition Image")
|
152 |
prompt = gr.Textbox(label="Prompt", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
153 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
|
|
162 |
with gr.Tab("Color Guided"):
|
163 |
with gr.Column():
|
164 |
gr.Markdown("""
|
165 |
+
Generate an image using a color palette. This mode requires a text prompt and a color list. If you choose to include an image, the subject and style will be used as a reference.
|
166 |
+
The colors of the image will also be incorporated, along with the colors from the colors list. A generic color list has been provided behind the scenes if one isn't added.
|
167 |
+
""", elem_classes="center-markdown")
|
168 |
+
with gr.Row():
|
169 |
+
with gr.Column(scale=75):
|
170 |
+
colors = gr.Textbox(label="Colors", placeholder="Enter up to 10 colors as hex values, e.g., #00FF00,#FCF2AB", max_lines=1)
|
171 |
+
with gr.Column(scale=25):
|
172 |
+
color_picker = gr.ColorPicker(label="Color Picker", show_label=False)
|
173 |
+
#add_color_button = gr.Button("Add Color") Work out Color Picker Collapsing and Rerendering
|
174 |
+
#add_color_button.click(fn=add_color_to_list, inputs=[colors, color_picker], outputs=colors)
|
175 |
prompt = gr.Textbox(label="Text", placeholder="Enter a text prompt (1-1024 characters)", max_lines=4)
|
176 |
gr.Button("Generate Prompt").click(generate_nova_prompt, outputs=prompt)
|
177 |
with gr.Accordion("Optional Reference Image", open=False):
|
|
|
185 |
with gr.Tab("Background Removal"):
|
186 |
with gr.Column():
|
187 |
gr.Markdown("""
|
|
|
188 |
Remove the background from an image.
|
189 |
+
""", elem_classes="center-markdown")
|
|
|
190 |
image = gr.Image(type='pil', label="Input Image")
|
191 |
error_box = gr.Markdown(visible=False, label="Error", elem_classes="center-markdown")
|
192 |
output = gr.Image()
|
functions.py
CHANGED
@@ -4,64 +4,88 @@ import random
|
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
from generate import *
|
|
|
7 |
from typing import Dict, Any
|
8 |
from processImage import process_and_encode_image
|
9 |
|
10 |
-
def
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
else:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
if primary:
|
29 |
-
result["image"] = process_and_encode_image(primary)
|
30 |
-
if secondary:
|
31 |
-
result["maskImage"] = process_and_encode_image(secondary)
|
32 |
-
return result
|
33 |
-
|
34 |
-
def create_image_generation_config(height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
35 |
-
return {
|
36 |
-
"numberOfImages": 1,
|
37 |
-
"height": height,
|
38 |
-
"width": width,
|
39 |
-
"quality": quality,
|
40 |
-
"cfgScale": cfg_scale,
|
41 |
-
"seed": seed
|
42 |
-
}
|
43 |
|
44 |
def build_request(task_type, params, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
45 |
-
param_dict = {
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
return json.dumps({
|
49 |
"taskType": task_type,
|
50 |
param_dict[task_type]: params,
|
51 |
-
"imageGenerationConfig":
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
|
|
58 |
})
|
59 |
|
60 |
def check_return(result):
|
61 |
if not isinstance(result, bytes):
|
62 |
return None, gr.update(visible=True, value=result)
|
63 |
-
|
64 |
-
return Image.open(io.BytesIO(result)), gr.update(visible=False)
|
65 |
|
66 |
|
67 |
def text_to_image(prompt, negative_text=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
@@ -74,21 +98,23 @@ def text_to_image(prompt, negative_text=None, height=1024, width=1024, quality="
|
|
74 |
return check_return(result)
|
75 |
|
76 |
|
77 |
-
def inpainting(
|
78 |
-
|
|
|
|
|
79 |
|
80 |
-
for value in images.values():
|
81 |
-
if len(value) < 200:
|
82 |
-
return None, gr.update(visible=True, value=value)
|
83 |
-
# Prepare the inPaintingParams dictionary
|
84 |
if mask_prompt and mask_image:
|
85 |
raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
|
86 |
if not mask_prompt and not mask_image:
|
87 |
raise ValueError("You must specify either maskPrompt or maskImage.")
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
90 |
in_painting_params = {
|
91 |
-
|
|
|
92 |
**({"maskPrompt": mask_prompt} if mask_prompt not in [None, ""] else {}),
|
93 |
**({"text": text} if text not in [None, ""] else {}),
|
94 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
@@ -99,25 +125,29 @@ def inpainting(image, mask_prompt=None, mask_image=None, text=None, negative_tex
|
|
99 |
|
100 |
return check_return(result)
|
101 |
|
102 |
-
def outpainting(
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
return None, gr.update(visible=True, value=value)
|
107 |
|
108 |
if mask_prompt and mask_image:
|
109 |
raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
|
110 |
if not mask_prompt and not mask_image:
|
111 |
raise ValueError("You must specify either maskPrompt or maskImage.")
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
# Prepare the outPaintingParams dictionary
|
114 |
out_painting_params = {
|
115 |
-
|
116 |
-
**
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
)
|
121 |
}
|
122 |
|
123 |
body = build_request("OUTPAINTING", out_painting_params, height, width, quality, cfg_scale, seed)
|
@@ -138,6 +168,7 @@ def image_variation(images, text=None, negative_text=None, similarity_strength=0
|
|
138 |
# Prepare the imageVariationParams dictionary
|
139 |
image_variation_params = {
|
140 |
"images": encoded_images,
|
|
|
141 |
**({"text": text} if text not in [None, ""] else {}),
|
142 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
143 |
}
|
@@ -148,16 +179,16 @@ def image_variation(images, text=None, negative_text=None, similarity_strength=0
|
|
148 |
return check_return(result)
|
149 |
|
150 |
def image_conditioning(condition_image, text, negative_text=None, control_mode="CANNY_EDGE", control_strength=0.7, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
151 |
-
condition_image_encoded =
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
# Prepare the textToImageParams dictionary
|
156 |
text_to_image_params = {
|
157 |
"text": text,
|
158 |
"controlMode": control_mode,
|
159 |
"controlStrength": control_strength,
|
160 |
-
"conditionImage": condition_image_encoded
|
161 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
162 |
}
|
163 |
body = build_request("TEXT_IMAGE", text_to_image_params, height, width, quality, cfg_scale, seed)
|
@@ -169,19 +200,18 @@ def color_guided_content(text=None, reference_image=None, negative_text=None, co
|
|
169 |
reference_image_str = None
|
170 |
|
171 |
if reference_image is not None and not isinstance(reference_image, type(None)):
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
reference_image_str = reference_image_encoded.get('image')
|
178 |
if not colors:
|
179 |
colors = "#FF5733,#33FF57,#3357FF,#FF33A1,#33FFF5,#FF8C33,#8C33FF,#33FF8C,#FF3333,#33A1FF"
|
180 |
|
181 |
color_guided_generation_params = {
|
182 |
"text": text,
|
183 |
-
"colors": colors.split(','),
|
184 |
-
**({"referenceImage":
|
185 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
186 |
}
|
187 |
|
@@ -191,15 +221,15 @@ def color_guided_content(text=None, reference_image=None, negative_text=None, co
|
|
191 |
return check_return(result)
|
192 |
|
193 |
def background_removal(image):
|
194 |
-
input_image =
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
|
199 |
body = json.dumps({
|
200 |
"taskType": "BACKGROUND_REMOVAL",
|
201 |
"backgroundRemovalParams": {
|
202 |
-
"image": input_image
|
203 |
}
|
204 |
})
|
205 |
result = generate_image(body)
|
|
|
4 |
import gradio as gr
|
5 |
from PIL import Image
|
6 |
from generate import *
|
7 |
+
import numpy as np
|
8 |
from typing import Dict, Any
|
9 |
from processImage import process_and_encode_image
|
10 |
|
11 |
+
def rgba_to_hex(rgba):
|
12 |
+
r, g, b, _ = [int(float(x)) for x in rgba[5:-1].split(',')]
|
13 |
+
return f"#{r:02X}{g:02X}{b:02X}"
|
14 |
+
|
15 |
+
def add_color_to_list(current_colors, new_color):
|
16 |
+
new_color_hex = rgba_to_hex(new_color)
|
17 |
+
color_list = current_colors.split(',')
|
18 |
+
if new_color_hex not in color_list and len(color_list) < 10:
|
19 |
+
color_list.append(new_color_hex)
|
20 |
+
return ','.join(filter(None, color_list))
|
21 |
+
|
22 |
+
def create_padded_image(image, padding_percent=100):
|
23 |
+
image = image['background']
|
24 |
+
if image.mode != 'RGBA':
|
25 |
+
image = image.convert('RGBA')
|
26 |
+
|
27 |
+
width, height = image.size
|
28 |
+
new_width = int(width * (1 + padding_percent/100))
|
29 |
+
new_height = int(height * (1 + padding_percent/100))
|
30 |
+
|
31 |
+
padded = Image.new('RGBA', (new_width, new_height), (0, 0, 0, 0))
|
32 |
+
|
33 |
+
x_offset = (new_width - width) // 2
|
34 |
+
y_offset = (new_height - height) // 2
|
35 |
+
|
36 |
+
padded.paste(image, (x_offset, y_offset))
|
37 |
+
return padded
|
38 |
+
|
39 |
+
def process_composite_to_mask(original_image, composite_image, transparent=False):
|
40 |
+
original_array = np.array(original_image.convert('RGBA'))
|
41 |
+
if transparent:
|
42 |
+
white_background = Image.new('RGBA', original_image.size, (255, 255, 255, 255))
|
43 |
+
white_background.paste(original_image, (0, 0), original_image)
|
44 |
+
return white_background
|
45 |
+
if composite_image is None:
|
46 |
+
mask = np.full(original_array.shape[:2], 255, dtype=np.uint8) # Start with white
|
47 |
+
transparent_areas = original_array[:, :, 3] == 0 # Alpha channel is 0 for transparent pixels
|
48 |
+
mask[transparent_areas] = 0
|
49 |
else:
|
50 |
+
composite_array = np.array(composite_image.convert('RGBA'))
|
51 |
+
|
52 |
+
difference = np.any(original_array != composite_array, axis=2)
|
53 |
+
mask = np.full(original_array.shape[:2], 255, dtype=np.uint8)
|
54 |
+
mask[difference] = 0
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
return Image.fromarray(mask, mode='L')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
def build_request(task_type, params, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
62 |
+
param_dict = {
|
63 |
+
"TEXT_IMAGE": "textToImageParams",
|
64 |
+
"INPAINTING": "inPaintingParams",
|
65 |
+
"OUTPAINTING": "outPaintingParams",
|
66 |
+
"IMAGE_VARIATION": "imageVariationParams",
|
67 |
+
"COLOR_GUIDED_GENERATION": "colorGuidedGenerationParams",
|
68 |
+
"BACKGROUND_REMOVAL": "backgroundRemovalParams"
|
69 |
+
}
|
70 |
+
|
71 |
return json.dumps({
|
72 |
"taskType": task_type,
|
73 |
param_dict[task_type]: params,
|
74 |
+
"imageGenerationConfig": {
|
75 |
+
"numberOfImages": 1,
|
76 |
+
"height": height,
|
77 |
+
"width": width,
|
78 |
+
"quality": quality,
|
79 |
+
"cfgScale": cfg_scale,
|
80 |
+
"seed": seed
|
81 |
+
}
|
82 |
})
|
83 |
|
84 |
def check_return(result):
|
85 |
if not isinstance(result, bytes):
|
86 |
return None, gr.update(visible=True, value=result)
|
87 |
+
|
88 |
+
return Image.open(io.BytesIO(result)), gr.update(value=None,visible=False)
|
89 |
|
90 |
|
91 |
def text_to_image(prompt, negative_text=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
|
|
98 |
return check_return(result)
|
99 |
|
100 |
|
101 |
+
def inpainting(mask_image, mask_prompt=None, text=None, negative_text=None, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
102 |
+
image = process_and_encode_image(mask_image['background'])
|
103 |
+
if len(image) < 200:
|
104 |
+
return None, gr.update(visible=True, value=image)
|
105 |
|
|
|
|
|
|
|
|
|
106 |
if mask_prompt and mask_image:
|
107 |
raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
|
108 |
if not mask_prompt and not mask_image:
|
109 |
raise ValueError("You must specify either maskPrompt or maskImage.")
|
110 |
+
|
111 |
+
if mask_image and 'composite' in mask_image:
|
112 |
+
mask = process_composite_to_mask(mask_image['background'], mask_image['composite'])
|
113 |
+
mask_image = process_and_encode_image(mask)
|
114 |
+
|
115 |
in_painting_params = {
|
116 |
+
"image": image,
|
117 |
+
**({"maskImage": mask_image} if mask_image not in [None, ""] else {}),
|
118 |
**({"maskPrompt": mask_prompt} if mask_prompt not in [None, ""] else {}),
|
119 |
**({"text": text} if text not in [None, ""] else {}),
|
120 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
|
|
125 |
|
126 |
return check_return(result)
|
127 |
|
128 |
+
def outpainting(mask_image, mask_prompt=None, text=None, negative_text=None, outpainting_mode="DEFAULT", height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
129 |
+
image = process_and_encode_image(mask_image['background'])
|
130 |
+
if len(value) < 200:
|
131 |
+
return None, gr.update(visible=True, value=value)
|
|
|
132 |
|
133 |
if mask_prompt and mask_image:
|
134 |
raise ValueError("You must specify either maskPrompt or maskImage, but not both.")
|
135 |
if not mask_prompt and not mask_image:
|
136 |
raise ValueError("You must specify either maskPrompt or maskImage.")
|
137 |
+
|
138 |
+
if mask_image and 'composite' in mask_image:
|
139 |
+
mask = process_composite_to_mask(mask_image['background'], None)
|
140 |
+
image = process_composite_to_mask(mask_image['background'], None, True)
|
141 |
+
image = process_and_encode_image(image)
|
142 |
+
mask_image = process_and_encode_image(mask)
|
143 |
|
144 |
# Prepare the outPaintingParams dictionary
|
145 |
out_painting_params = {
|
146 |
+
"image": image,
|
147 |
+
**({"maskImage": mask_image} if mask_image not in [None, ""] else {}),
|
148 |
+
**({"maskPrompt": mask_prompt} if mask_prompt not in [None, ""] else {}),
|
149 |
+
**({"text": text} if text not in [None, ""] else {"text": " "}),
|
150 |
+
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
|
|
151 |
}
|
152 |
|
153 |
body = build_request("OUTPAINTING", out_painting_params, height, width, quality, cfg_scale, seed)
|
|
|
168 |
# Prepare the imageVariationParams dictionary
|
169 |
image_variation_params = {
|
170 |
"images": encoded_images,
|
171 |
+
**({"similarityStrength": similarity_strength} if similarity_strength not in [None, ""] else {}),
|
172 |
**({"text": text} if text not in [None, ""] else {}),
|
173 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
174 |
}
|
|
|
179 |
return check_return(result)
|
180 |
|
181 |
def image_conditioning(condition_image, text, negative_text=None, control_mode="CANNY_EDGE", control_strength=0.7, height=1024, width=1024, quality="standard", cfg_scale=8.0, seed=0):
|
182 |
+
condition_image_encoded = process_and_encode_image(condition_image)
|
183 |
+
|
184 |
+
if len(condition_image_encoded) < 200:
|
185 |
+
return None, gr.update(visible=True, value=condition_image_encoded)
|
186 |
# Prepare the textToImageParams dictionary
|
187 |
text_to_image_params = {
|
188 |
"text": text,
|
189 |
"controlMode": control_mode,
|
190 |
"controlStrength": control_strength,
|
191 |
+
"conditionImage": condition_image_encoded,
|
192 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
193 |
}
|
194 |
body = build_request("TEXT_IMAGE", text_to_image_params, height, width, quality, cfg_scale, seed)
|
|
|
200 |
reference_image_str = None
|
201 |
|
202 |
if reference_image is not None and not isinstance(reference_image, type(None)):
|
203 |
+
reference_image_encoded = process_and_encode_image(reference_image)
|
204 |
+
|
205 |
+
if len(reference_image_encoded) < 200:
|
206 |
+
return None, gr.update(visible=True, value=reference_image_encoded)
|
207 |
+
|
|
|
208 |
if not colors:
|
209 |
colors = "#FF5733,#33FF57,#3357FF,#FF33A1,#33FFF5,#FF8C33,#8C33FF,#33FF8C,#FF3333,#33A1FF"
|
210 |
|
211 |
color_guided_generation_params = {
|
212 |
"text": text,
|
213 |
+
"colors": [color.strip() for color in colors.split(',')],
|
214 |
+
**({"referenceImage": reference_image_encoded} if reference_image_str is not None else {}),
|
215 |
**({"negativeText": negative_text} if negative_text not in [None, ""] else {})
|
216 |
}
|
217 |
|
|
|
221 |
return check_return(result)
|
222 |
|
223 |
def background_removal(image):
|
224 |
+
input_image = process_and_encode_image(image)
|
225 |
+
|
226 |
+
if len(input_image) < 200:
|
227 |
+
return None, gr.update(visible=True, value=input_image)
|
228 |
|
229 |
body = json.dumps({
|
230 |
"taskType": "BACKGROUND_REMOVAL",
|
231 |
"backgroundRemovalParams": {
|
232 |
+
"image": input_image
|
233 |
}
|
234 |
})
|
235 |
result = generate_image(body)
|
generate.py
CHANGED
@@ -173,7 +173,7 @@ def check_rate_limit(body):
|
|
173 |
raise ImageError(rate_limit_message.format('Premium'))
|
174 |
rate_data['premium'].append(current_time)
|
175 |
else: # standard
|
176 |
-
if len(rate_data['standard']) >=
|
177 |
raise ImageError(rate_limit_message.format('Standard'))
|
178 |
rate_data['standard'].append(current_time)
|
179 |
|
|
|
173 |
raise ImageError(rate_limit_message.format('Premium'))
|
174 |
rate_data['premium'].append(current_time)
|
175 |
else: # standard
|
176 |
+
if len(rate_data['standard']) >= 100:
|
177 |
raise ImageError(rate_limit_message.format('Standard'))
|
178 |
rate_data['standard'].append(current_time)
|
179 |
|