Implement ImagetoImage
Browse files- utils/ai_generator.py +8 -1
- utils/ai_generator_diffusers_flux.py +40 -13
utils/ai_generator.py
CHANGED
@@ -32,19 +32,26 @@ def generate_ai_image(
|
|
32 |
neg_prompt_textbox_value,
|
33 |
model,
|
34 |
lora_weights=None,
|
|
|
|
|
35 |
*args,
|
36 |
**kwargs
|
37 |
):
|
38 |
seed = random.randint(1, 99999)
|
39 |
if torch.cuda.is_available():
|
40 |
print("Local GPU available. Generating image locally.")
|
|
|
|
|
41 |
return generate_ai_image_local(
|
42 |
map_option,
|
43 |
prompt_textbox_value,
|
44 |
neg_prompt_textbox_value,
|
45 |
model,
|
46 |
lora_weights=lora_weights,
|
47 |
-
seed=seed
|
|
|
|
|
|
|
48 |
)
|
49 |
else:
|
50 |
print("No local GPU available. Sending request to Hugging Face API.")
|
|
|
32 |
neg_prompt_textbox_value,
|
33 |
model,
|
34 |
lora_weights=None,
|
35 |
+
conditioned_image=None,
|
36 |
+
pipeline = "FluxPipeline",
|
37 |
*args,
|
38 |
**kwargs
|
39 |
):
|
40 |
seed = random.randint(1, 99999)
|
41 |
if torch.cuda.is_available():
|
42 |
print("Local GPU available. Generating image locally.")
|
43 |
+
if conditioned_image is not None:
|
44 |
+
pipeline = "FluxImg2ImgPipeline"
|
45 |
return generate_ai_image_local(
|
46 |
map_option,
|
47 |
prompt_textbox_value,
|
48 |
neg_prompt_textbox_value,
|
49 |
model,
|
50 |
lora_weights=lora_weights,
|
51 |
+
seed=seed,
|
52 |
+
conditioned_image=conditioned_image,
|
53 |
+
pipeline_name=pipeline,
|
54 |
+
strength=0.5
|
55 |
)
|
56 |
else:
|
57 |
print("No local GPU available. Sending request to Hugging Face API.")
|
utils/ai_generator_diffusers_flux.py
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
# utils/ai_generator_diffusers_flux.py
|
2 |
import os
|
3 |
import torch
|
|
|
4 |
import accelerate
|
5 |
import transformers
|
6 |
import safetensors
|
7 |
import xformers
|
8 |
-
from diffusers import FluxPipeline
|
9 |
from diffusers.utils import load_image
|
10 |
# from huggingface_hub import hf_hub_download
|
11 |
from PIL import Image
|
@@ -31,6 +31,12 @@ warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash a
|
|
31 |
#print(torch.__version__) # Ensure it's 2.0 or newer
|
32 |
#print(torch.cuda.is_available()) # Ensure CUDA is available
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def generate_image_from_text(
|
35 |
text,
|
36 |
model_name="black-forest-labs/FLUX.1-dev",
|
@@ -101,15 +107,22 @@ def generate_image_lowmem(
|
|
101 |
num_inference_steps=50,
|
102 |
seed=0,
|
103 |
true_cfg_scale=1.0,
|
|
|
|
|
104 |
additional_parameters=None
|
105 |
):
|
|
|
|
|
|
|
|
|
|
|
106 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
107 |
print(f"device:{device}\nmodel_name:{model_name}\n")
|
108 |
print(f"\n {get_torch_info()}\n")
|
109 |
# Disable gradient calculations
|
110 |
with torch.no_grad():
|
111 |
# Initialize the pipeline inside the context manager
|
112 |
-
pipe =
|
113 |
model_name,
|
114 |
torch_dtype=torch.bfloat16 if device == "cuda" else torch.bfloat32
|
115 |
).to(device)
|
@@ -125,7 +138,8 @@ def generate_image_lowmem(
|
|
125 |
else:
|
126 |
pipe.attn_implementation="flash_attention_2"
|
127 |
print("\nEnabled flash_attention_2.\n")
|
128 |
-
|
|
|
129 |
# Load LoRA weights
|
130 |
if lora_weights:
|
131 |
for lora_weight in lora_weights:
|
@@ -163,22 +177,30 @@ def generate_image_lowmem(
|
|
163 |
generator = torch.Generator(device=device).manual_seed(seed)
|
164 |
conditions = []
|
165 |
if conditioned_image is not None:
|
166 |
-
conditioned_image = crop_and_resize_image(conditioned_image,
|
167 |
condition = Condition("subject", conditioned_image)
|
168 |
conditions.append(condition)
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
generate_params = {
|
172 |
-
"prompt": text,
|
173 |
-
"negative_prompt": neg_prompt,
|
174 |
-
"true_cfg_scale": true_cfg_scale,
|
175 |
"height": image_height,
|
176 |
"width": image_width,
|
177 |
"guidance_scale": guidance_scale,
|
178 |
"num_inference_steps": num_inference_steps,
|
179 |
-
"generator": generator,
|
180 |
-
"conditions": conditions if conditions else None
|
181 |
-
}
|
182 |
if additional_parameters:
|
183 |
generate_params.update(additional_parameters)
|
184 |
generate_params = {k: v for k, v in generate_params.items() if v is not None}
|
@@ -206,7 +228,9 @@ def generate_ai_image_local (
|
|
206 |
width=896,
|
207 |
num_inference_steps=50,
|
208 |
guidance_scale=3.5,
|
209 |
-
seed=777
|
|
|
|
|
210 |
):
|
211 |
try:
|
212 |
if map_option != "Prompt":
|
@@ -246,6 +270,8 @@ def generate_ai_image_local (
|
|
246 |
print(f"Guidance Scale: {guidance_scale}")
|
247 |
print(f"Seed: {seed}")
|
248 |
print(f"Additional Parameters: {additional_parameters}")
|
|
|
|
|
249 |
image = generate_image_lowmem(
|
250 |
text=prompt,
|
251 |
model_name=model,
|
@@ -257,6 +283,7 @@ def generate_ai_image_local (
|
|
257 |
guidance_scale=guidance_scale,
|
258 |
num_inference_steps=num_inference_steps,
|
259 |
seed=seed,
|
|
|
260 |
additional_parameters=additional_parameters
|
261 |
)
|
262 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
|
|
1 |
# utils/ai_generator_diffusers_flux.py
|
2 |
import os
|
3 |
import torch
|
4 |
+
from diffusers import FluxPipeline,FluxImg2ImgPipeline
|
5 |
import accelerate
|
6 |
import transformers
|
7 |
import safetensors
|
8 |
import xformers
|
|
|
9 |
from diffusers.utils import load_image
|
10 |
# from huggingface_hub import hf_hub_download
|
11 |
from PIL import Image
|
|
|
31 |
#print(torch.__version__) # Ensure it's 2.0 or newer
|
32 |
#print(torch.cuda.is_available()) # Ensure CUDA is available
|
33 |
|
34 |
+
PIPELINE_CLASSES = {
|
35 |
+
"FluxPipeline": FluxPipeline,
|
36 |
+
"FluxImg2ImgPipeline": FluxImg2ImgPipeline
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
def generate_image_from_text(
|
41 |
text,
|
42 |
model_name="black-forest-labs/FLUX.1-dev",
|
|
|
107 |
num_inference_steps=50,
|
108 |
seed=0,
|
109 |
true_cfg_scale=1.0,
|
110 |
+
pipeline_name="FluxPipeline",
|
111 |
+
strength=0.75,
|
112 |
additional_parameters=None
|
113 |
):
|
114 |
+
# Retrieve the pipeline class from the mapping
|
115 |
+
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
|
116 |
+
if not pipeline_class:
|
117 |
+
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
|
118 |
+
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
119 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
120 |
print(f"device:{device}\nmodel_name:{model_name}\n")
|
121 |
print(f"\n {get_torch_info()}\n")
|
122 |
# Disable gradient calculations
|
123 |
with torch.no_grad():
|
124 |
# Initialize the pipeline inside the context manager
|
125 |
+
pipe = pipeline_class.from_pretrained(
|
126 |
model_name,
|
127 |
torch_dtype=torch.bfloat16 if device == "cuda" else torch.bfloat32
|
128 |
).to(device)
|
|
|
138 |
else:
|
139 |
pipe.attn_implementation="flash_attention_2"
|
140 |
print("\nEnabled flash_attention_2.\n")
|
141 |
+
if pipeline_name == "FluxPipeline":
|
142 |
+
pipe.enable_vae_tiling()
|
143 |
# Load LoRA weights
|
144 |
if lora_weights:
|
145 |
for lora_weight in lora_weights:
|
|
|
177 |
generator = torch.Generator(device=device).manual_seed(seed)
|
178 |
conditions = []
|
179 |
if conditioned_image is not None:
|
180 |
+
conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
|
181 |
condition = Condition("subject", conditioned_image)
|
182 |
conditions.append(condition)
|
183 |
+
print(f"\nAdded conditioned image.\n {conditioned_image.size}")
|
184 |
+
# Prepare the parameters for image generation
|
185 |
+
additional_parameters ={
|
186 |
+
"strength": strength,
|
187 |
+
"image": conditioned_image,
|
188 |
+
}
|
189 |
+
else:
|
190 |
+
print("\nNo conditioned image provided.")
|
191 |
+
if neg_prompt!=None:
|
192 |
+
true_cfg_scale=1.1
|
193 |
+
additional_parameters ={
|
194 |
+
"negative_prompt": neg_prompt,
|
195 |
+
"true_cfg_scale": true_cfg_scale,
|
196 |
+
}
|
197 |
generate_params = {
|
198 |
+
"prompt": text,
|
|
|
|
|
199 |
"height": image_height,
|
200 |
"width": image_width,
|
201 |
"guidance_scale": guidance_scale,
|
202 |
"num_inference_steps": num_inference_steps,
|
203 |
+
"generator": generator, }
|
|
|
|
|
204 |
if additional_parameters:
|
205 |
generate_params.update(additional_parameters)
|
206 |
generate_params = {k: v for k, v in generate_params.items() if v is not None}
|
|
|
228 |
width=896,
|
229 |
num_inference_steps=50,
|
230 |
guidance_scale=3.5,
|
231 |
+
seed=777,
|
232 |
+
pipeline_name="FluxPipeline",
|
233 |
+
strength=0.75,
|
234 |
):
|
235 |
try:
|
236 |
if map_option != "Prompt":
|
|
|
270 |
print(f"Guidance Scale: {guidance_scale}")
|
271 |
print(f"Seed: {seed}")
|
272 |
print(f"Additional Parameters: {additional_parameters}")
|
273 |
+
print(f"Conditioned Image: {conditioned_image}")
|
274 |
+
print(f"pipeline: {pipeline_name}")
|
275 |
image = generate_image_lowmem(
|
276 |
text=prompt,
|
277 |
model_name=model,
|
|
|
283 |
guidance_scale=guidance_scale,
|
284 |
num_inference_steps=num_inference_steps,
|
285 |
seed=seed,
|
286 |
+
pipeline_name=pipeline_name,
|
287 |
additional_parameters=additional_parameters
|
288 |
)
|
289 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|