Upload 4 files
Browse files
Scripts/run_sdxl_creaprompt.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from daam import trace, set_seed
|
2 |
+
from diffusers import StableDiffusionXLPipeline
|
3 |
+
from matplotlib import pyplot as plt
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Verify GPU availability
|
8 |
+
if not torch.cuda.is_available():
|
9 |
+
raise RuntimeError("CUDA is not available. Please ensure a GPU is available and PyTorch is installed with CUDA support.")
|
10 |
+
|
11 |
+
# Create output directory
|
12 |
+
output_dir = 'sdxl-creaprompt'
|
13 |
+
os.makedirs(output_dir, exist_ok=True) # Create 'sdxl-creaprompt' folder if it doesn't exist
|
14 |
+
|
15 |
+
# Model setup
|
16 |
+
model_url = 'https://huggingface.co/ApacheOne/local-checkpoints/blob/main/SDXL(PONY)/creapromptLightning_creapromtHypersdxlV1.safetensors'
|
17 |
+
device = 'cuda' # Explicitly set to GPU
|
18 |
+
|
19 |
+
# Load the pipeline from a single .safetensors file
|
20 |
+
pipe = StableDiffusionXLPipeline.from_single_file(
|
21 |
+
model_url,
|
22 |
+
torch_dtype=torch.float16, # Use float16 for faster inference on GPU
|
23 |
+
use_safetensors=True, # Ensure safetensors format
|
24 |
+
variant='fp16' # FP16 variant for efficiency
|
25 |
+
)
|
26 |
+
|
27 |
+
# GPU-specific optimizations
|
28 |
+
pipe.enable_model_cpu_offload() # Offload parts to CPU if VRAM is low
|
29 |
+
pipe.enable_vae_slicing() # Slice VAE operations to reduce memory usage
|
30 |
+
pipe = pipe.to(device)
|
31 |
+
|
32 |
+
# Prompt and generation settings
|
33 |
+
prompt = 'realism eohwx woman, wearing dark black low wasit jeans,white shoes and red crop top, hands by side, ,full body shot,Lake Tahoe,(masterpiece best quality ultra-detailed best shadow amazing realistic picture)'
|
34 |
+
gen = set_seed(42) # Reproducible seed
|
35 |
+
|
36 |
+
# Generate image and heatmaps
|
37 |
+
with torch.no_grad():
|
38 |
+
with trace(pipe) as tc:
|
39 |
+
out = pipe(
|
40 |
+
prompt,
|
41 |
+
num_inference_steps=6, # Reduced steps for faster generation (increase to 30-50 for better quality)
|
42 |
+
generator=gen,
|
43 |
+
callback=tc.time_callback,
|
44 |
+
callback_steps=1
|
45 |
+
)
|
46 |
+
# Save the generated image
|
47 |
+
generated_image_path = os.path.join(output_dir, 'generated_image.png')
|
48 |
+
out.images[0].save(generated_image_path)
|
49 |
+
|
50 |
+
# Generate and save heatmaps
|
51 |
+
heat_map = tc.compute_global_heat_map()
|
52 |
+
for word in prompt.split():
|
53 |
+
word_heat_map = heat_map.compute_word_heat_map(word)
|
54 |
+
|
55 |
+
# Create the heatmap overlay plot
|
56 |
+
fig = plt.figure()
|
57 |
+
word_heat_map.plot_overlay(out.images[0])
|
58 |
+
plt.title(f"Heatmap for '{word}'")
|
59 |
+
|
60 |
+
# Save the heatmap as a PNG
|
61 |
+
heatmap_path = os.path.join(output_dir, f'heatmap_{word}.png')
|
62 |
+
plt.savefig(heatmap_path, bbox_inches='tight')
|
63 |
+
plt.close(fig) # Close the figure to free memory
|
64 |
+
|
65 |
+
# Save the experiment
|
66 |
+
exp = tc.to_experiment('sdxl-creaprompt-experiment-gpu')
|
67 |
+
exp.save() # Saves to 'sdxl-creaprompt-experiment-gpu' folder
|
68 |
+
|
69 |
+
print(f"Generation complete! Images saved in '{output_dir}' folder:")
|
70 |
+
print(f"- Generated image: {generated_image_path}")
|
71 |
+
print(f"- Heatmaps: {output_dir}/heatmap_<word>.png")
|
72 |
+
print("Experiment saved in 'sdxl-creaprompt-experiment-gpu'.")
|
Scripts/run_sdxl_creapromptVAE.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from daam import trace, set_seed
|
2 |
+
from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler, AutoencoderKL
|
3 |
+
from matplotlib import pyplot as plt
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Verify GPU availability
|
8 |
+
if not torch.cuda.is_available():
|
9 |
+
raise RuntimeError("CUDA is not available. Please ensure a GPU is available and PyTorch is installed with CUDA support.")
|
10 |
+
|
11 |
+
# Create output directory
|
12 |
+
output_dir = 'sdxl-creaprompt'
|
13 |
+
os.makedirs(output_dir, exist_ok=True) # Create 'sdxl-creaprompt' folder if it doesn't exist
|
14 |
+
|
15 |
+
# Model setup
|
16 |
+
model_url = 'https://huggingface.co/ApacheOne/local-checkpoints/blob/main/SDXL(PONY)/creapromptLightning_creapromtHypersdxlV1.safetensors'
|
17 |
+
vae_url = 'https://huggingface.co/ApacheOne/local-checkpoints/blob/main/SDXL(PONY)/VAES/_bothyper.safetensors'
|
18 |
+
device = 'cuda' # Explicitly set to GPU
|
19 |
+
|
20 |
+
# Load the custom VAE
|
21 |
+
vae = AutoencoderKL.from_single_file(
|
22 |
+
vae_url,
|
23 |
+
torch_dtype=torch.float16, # Match the pipeline's dtype
|
24 |
+
use_safetensors=True
|
25 |
+
)
|
26 |
+
|
27 |
+
# Load the pipeline with the custom VAE
|
28 |
+
pipe = StableDiffusionXLPipeline.from_single_file(
|
29 |
+
model_url,
|
30 |
+
torch_dtype=torch.float16, # Use float16 for faster inference on GPU
|
31 |
+
use_safetensors=True, # Ensure safetensors format
|
32 |
+
variant='fp16', # FP16 variant for efficiency
|
33 |
+
vae=vae # Pass the custom VAE
|
34 |
+
)
|
35 |
+
|
36 |
+
# Set the scheduler to DPMSolverMultistepScheduler (dpmpp_sde) with "normal" variant
|
37 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
38 |
+
pipe.scheduler.config,
|
39 |
+
use_karras=False # "normal" variant (linear beta schedule, not Karras)
|
40 |
+
)
|
41 |
+
|
42 |
+
# GPU-specific optimizations
|
43 |
+
pipe.enable_model_cpu_offload() # Offload parts to CPU if VRAM is low
|
44 |
+
pipe.enable_vae_slicing() # Slice VAE operations to reduce memory usage
|
45 |
+
pipe = pipe.to(device)
|
46 |
+
|
47 |
+
# Prompt and generation settings
|
48 |
+
prompt = '(masterpiece best quality ultra-detailed best shadow amazing realistic picture) realistic woman, full body, white blackground '
|
49 |
+
gen = set_seed(42) # Reproducible seed
|
50 |
+
|
51 |
+
# Generate image and heatmaps
|
52 |
+
with torch.no_grad():
|
53 |
+
with trace(pipe) as tc:
|
54 |
+
out = pipe(
|
55 |
+
prompt,
|
56 |
+
num_inference_steps=9, # Reduced steps for faster generation (increase to 30-50 for better quality)
|
57 |
+
generator=gen,
|
58 |
+
callback=tc.time_callback,
|
59 |
+
callback_steps=1,
|
60 |
+
guidance_scale=1.1, # Set CFG scale to 1.9
|
61 |
+
height=1024, # Set height to 1024
|
62 |
+
width=1024 # Set width to 1024
|
63 |
+
)
|
64 |
+
# Save the generated image
|
65 |
+
generated_image_path = os.path.join(output_dir, 'generated_image.png')
|
66 |
+
out.images[0].save(generated_image_path)
|
67 |
+
|
68 |
+
# Generate and save heatmaps
|
69 |
+
heat_map = tc.compute_global_heat_map()
|
70 |
+
for word in prompt.split():
|
71 |
+
word_heat_map = heat_map.compute_word_heat_map(word)
|
72 |
+
|
73 |
+
# Create the heatmap overlay plot
|
74 |
+
fig = plt.figure()
|
75 |
+
word_heat_map.plot_overlay(out.images[0])
|
76 |
+
plt.title(f"Heatmap for '{word}'")
|
77 |
+
|
78 |
+
# Save the heatmap as a PNG
|
79 |
+
heatmap_path = os.path.join(output_dir, f'heatmap_{word}.png')
|
80 |
+
plt.savefig(heatmap_path, bbox_inches='tight')
|
81 |
+
plt.close(fig) # Close the figure to free memory
|
82 |
+
|
83 |
+
# Save the experiment
|
84 |
+
exp = tc.to_experiment('sdxl-creaprompt-experiment-gpu')
|
85 |
+
exp.save() # Saves to 'sdxl-creaprompt-experiment-gpu' folder
|
86 |
+
|
87 |
+
print(f"Generation complete! Images saved in '{output_dir}' folder:")
|
88 |
+
print(f"- Generated image: {generated_image_path}")
|
89 |
+
print(f"- Heatmaps: {output_dir}/heatmap_<word>.png")
|
90 |
+
print("Experiment saved in 'sdxl-creaprompt-experiment-gpu'.")
|
Scripts/run_sdxl_creapromptbest.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from daam import trace, set_seed
|
2 |
+
from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler
|
3 |
+
from matplotlib import pyplot as plt
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Verify GPU availability
|
8 |
+
if not torch.cuda.is_available():
|
9 |
+
raise RuntimeError("CUDA is not available. Please ensure a GPU is available and PyTorch is installed with CUDA support.")
|
10 |
+
|
11 |
+
# Create output directory
|
12 |
+
output_dir = 'sdxl-creaprompt'
|
13 |
+
os.makedirs(output_dir, exist_ok=True) # Create 'sdxl-creaprompt' folder if it doesn't exist
|
14 |
+
|
15 |
+
# Model setup
|
16 |
+
model_url = 'https://huggingface.co/ApacheOne/local-checkpoints/blob/main/SDXL(PONY)/creapromptLightning_creapromtHypersdxlV1.safetensors'
|
17 |
+
device = 'cuda' # Explicitly set to GPU
|
18 |
+
|
19 |
+
# Load the pipeline from a single .safetensors file
|
20 |
+
pipe = StableDiffusionXLPipeline.from_single_file(
|
21 |
+
model_url,
|
22 |
+
torch_dtype=torch.float16, # Use float16 for faster inference on GPU
|
23 |
+
use_safetensors=True, # Ensure safetensors format
|
24 |
+
variant='fp16' # FP16 variant for efficiency
|
25 |
+
)
|
26 |
+
|
27 |
+
# Set the scheduler to DPMSolverMultistepScheduler (dpmpp_sde) with "normal" variant
|
28 |
+
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
|
29 |
+
pipe.scheduler.config,
|
30 |
+
use_karras=False # "normal" variant (linear beta schedule, not Karras)
|
31 |
+
)
|
32 |
+
|
33 |
+
# GPU-specific optimizations
|
34 |
+
pipe.enable_model_cpu_offload() # Offload parts to CPU if VRAM is low
|
35 |
+
pipe.enable_vae_slicing() # Slice VAE operations to reduce memory usage
|
36 |
+
pipe = pipe.to(device)
|
37 |
+
|
38 |
+
# Prompt and generation settings
|
39 |
+
prompt = 'realism woman, wearing dark black low waist jeans, white shoes and red crop top, hands by side, full body shot, Lake Tahoe, (masterpiece best quality ultra-detailed best shadow amazing realistic picture)'
|
40 |
+
gen = set_seed(42) # Reproducible seed
|
41 |
+
|
42 |
+
# Generate image and heatmaps
|
43 |
+
with torch.no_grad():
|
44 |
+
with trace(pipe) as tc:
|
45 |
+
out = pipe(
|
46 |
+
prompt,
|
47 |
+
num_inference_steps=13, # Reduced steps for faster generation (increase to 30-50 for better quality)
|
48 |
+
generator=gen,
|
49 |
+
callback=tc.time_callback,
|
50 |
+
callback_steps=1,
|
51 |
+
guidance_scale=1.9, # Set CFG scale to 1.1
|
52 |
+
height=1024, # Set height to 1024
|
53 |
+
width=1024 # Set width to 1024
|
54 |
+
)
|
55 |
+
# Save the generated image
|
56 |
+
generated_image_path = os.path.join(output_dir, 'generated_image.png')
|
57 |
+
out.images[0].save(generated_image_path)
|
58 |
+
|
59 |
+
# Generate and save heatmaps
|
60 |
+
heat_map = tc.compute_global_heat_map()
|
61 |
+
for word in prompt.split():
|
62 |
+
word_heat_map = heat_map.compute_word_heat_map(word)
|
63 |
+
|
64 |
+
# Create the heatmap overlay plot
|
65 |
+
fig = plt.figure()
|
66 |
+
word_heat_map.plot_overlay(out.images[0])
|
67 |
+
plt.title(f"Heatmap for '{word}'")
|
68 |
+
|
69 |
+
# Save the heatmap as a PNG
|
70 |
+
heatmap_path = os.path.join(output_dir, f'heatmap_{word}.png')
|
71 |
+
plt.savefig(heatmap_path, bbox_inches='tight')
|
72 |
+
plt.close(fig) # Close the figure to free memory
|
73 |
+
|
74 |
+
# Save the experiment
|
75 |
+
exp = tc.to_experiment('sdxl-creaprompt-experiment-gpu')
|
76 |
+
exp.save() # Saves to 'sdxl-creaprompt-experiment-gpu' folder
|
77 |
+
|
78 |
+
print(f"Generation complete! Images saved in '{output_dir}' folder:")
|
79 |
+
print(f"- Generated image: {generated_image_path}")
|
80 |
+
print(f"- Heatmaps: {output_dir}/heatmap_<word>.png")
|
81 |
+
print("Experiment saved in 'sdxl-creaprompt-experiment-gpu'.")
|
Scripts/run_sdxl_with_daam_gpu.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from daam import trace, set_seed
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
from matplotlib import pyplot as plt
|
4 |
+
import torch
|
5 |
+
import os
|
6 |
+
|
7 |
+
# Verify GPU availability
|
8 |
+
if not torch.cuda.is_available():
|
9 |
+
raise RuntimeError("CUDA is not available. Please ensure a GPU is available and PyTorch is installed with CUDA support.")
|
10 |
+
|
11 |
+
# Create output directory
|
12 |
+
output_dir = 'sdxl'
|
13 |
+
os.makedirs(output_dir, exist_ok=True) # Create 'sdxl' folder if it doesn't exist
|
14 |
+
|
15 |
+
# Model setup
|
16 |
+
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
17 |
+
device = 'cuda' # Explicitly set to GPU
|
18 |
+
|
19 |
+
# Load the pipeline with float16 for GPU
|
20 |
+
pipe = DiffusionPipeline.from_pretrained(
|
21 |
+
model_id,
|
22 |
+
torch_dtype=torch.float16, # Use float16 for faster inference and lower memory usage on GPU
|
23 |
+
use_safetensors=True, # Safetensors for faster loading
|
24 |
+
variant='fp16' # FP16 variant for efficiency
|
25 |
+
)
|
26 |
+
|
27 |
+
# GPU-specific optimizations
|
28 |
+
pipe.enable_model_cpu_offload() # Offload parts to CPU if VRAM is low
|
29 |
+
pipe.enable_vae_slicing() # Slice VAE operations to reduce memory usage
|
30 |
+
pipe = pipe.to(device)
|
31 |
+
|
32 |
+
# Prompt and generation settings
|
33 |
+
prompt = 'A human holding his hand up'
|
34 |
+
gen = set_seed(42) # Reproducible seed
|
35 |
+
|
36 |
+
# Generate image and heatmaps
|
37 |
+
with torch.no_grad():
|
38 |
+
with trace(pipe) as tc:
|
39 |
+
out = pipe(
|
40 |
+
prompt,
|
41 |
+
num_inference_steps=15, # Reduced steps for faster generation (increase to 30-50 for better quality)
|
42 |
+
generator=gen,
|
43 |
+
callback=tc.time_callback,
|
44 |
+
callback_steps=1
|
45 |
+
)
|
46 |
+
# Save the generated image
|
47 |
+
generated_image_path = os.path.join(output_dir, 'generated_image.png')
|
48 |
+
out.images[0].save(generated_image_path)
|
49 |
+
|
50 |
+
# Generate and save heatmaps
|
51 |
+
heat_map = tc.compute_global_heat_map()
|
52 |
+
for word in prompt.split():
|
53 |
+
word_heat_map = heat_map.compute_word_heat_map(word)
|
54 |
+
|
55 |
+
# Create the heatmap overlay plot
|
56 |
+
fig = plt.figure()
|
57 |
+
word_heat_map.plot_overlay(out.images[0])
|
58 |
+
plt.title(f"Heatmap for '{word}'")
|
59 |
+
|
60 |
+
# Save the heatmap as a PNG
|
61 |
+
heatmap_path = os.path.join(output_dir, f'heatmap_{word}.png')
|
62 |
+
plt.savefig(heatmap_path, bbox_inches='tight')
|
63 |
+
plt.close(fig) # Close the figure to free memory
|
64 |
+
|
65 |
+
# Save the experiment
|
66 |
+
exp = tc.to_experiment('sdxl-cat-experiment-gpu')
|
67 |
+
exp.save() # Saves to 'sdxl-cat-experiment-gpu' folder
|
68 |
+
|
69 |
+
print(f"Generation complete! Images saved in '{output_dir}' folder:")
|
70 |
+
print(f"- Generated image: {generated_image_path}")
|
71 |
+
print(f"- Heatmaps: {output_dir}/heatmap_<word>.png")
|
72 |
+
print("Experiment saved in 'sdxl-cat-experiment-gpu'.")
|