HexaGrid / app.py
Surn's picture
Reverse Changes
6dd859c
raw
history blame
53.8 kB
import os
# Import constants
import numpy as np
import torch
import utils.constants as constants
import gradio as gr
from PIL import Image
from haishoku.haishoku import Haishoku
from tempfile import NamedTemporaryFile
#from pathlib import Path
import atexit
import random
import logging
import accelerate
from transformers import AutoTokenizer
import gc
IS_SHARED_SPACE = constants.IS_SHARED_SPACE
# Import functions from modules
from utils.file_utils import cleanup_temp_files
from utils.color_utils import (
hex_to_rgb,
detect_color_format,
update_color_opacity,
)
from utils.misc import (get_filename, pause, convert_ratio_to_dimensions, install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
from utils.depth_estimation import estimate_depth, create_3d_model, generate_depth_and_3d, generate_depth_button_click
from utils.image_utils import (
change_color,
open_image,
build_prerendered_images,
upscale_image,
lerp_imagemath,
shrink_and_paste_on_blank,
show_lut,
apply_lut_to_image_path,
multiply_and_blend_images,
alpha_composite_with_control,
crop_and_resize_image
)
from utils.hex_grid import (
generate_hexagon_grid,
generate_hexagon_grid_interface,
)
from utils.excluded_colors import (
add_color,
delete_color,
build_dataframe,
on_input,
excluded_color_list,
on_color_display_select
)
# from utils.ai_generator import (
# generate_ai_image,
# )
from utils.version_info import (
versions_html,
#initialize_cuda,
#release_torch_resources,
get_torch_info
)
from utils.lora_details import (
upd_prompt_notes,
split_prompt_precisely,
approximate_token_count,
get_trigger_words
)
from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
PIPELINE_CLASSES = {
"FluxPipeline": FluxPipeline,
"FluxImg2ImgPipeline": FluxImg2ImgPipeline,
"FluxControlPipeline": FluxControlPipeline
}
import spaces
input_image_palette = []
current_prerendered_image = gr.State("./images/images/Beeuty-1.png")
# Register the cleanup function
atexit.register(cleanup_temp_files)
def hex_create(hex_size, border_size, input_image_path, start_x, start_y, end_x, end_y, rotation, background_color_hex, background_opacity, border_color_hex, border_opacity, fill_hex, excluded_colors_var, filter_color, x_spacing, y_spacing, add_hex_text_option=None, custom_text_list=None, custom_text_color_list=None):
global input_image_palette
try:
# Load and process the input image
input_image = Image.open(input_image_path).convert("RGBA")
except Exception as e:
print(f"Failed to convert image to RGBA: {e}")
# Open the original image without conversion
input_image = Image.open(input_image_path)
# Ensure the canvas is at least 1344x768 pixels
min_width, min_height = 1344, 768
canvas_width = max(min_width, input_image.width)
canvas_height = max(min_height, input_image.height)
# Create a transparent canvas with the required dimensions
new_canvas = Image.new("RGBA", (canvas_width, canvas_height), (0, 0, 0, 0))
# Calculate position to center the input image on the canvas
paste_x = (canvas_width - input_image.width) // 2
paste_y = (canvas_height - input_image.height) // 2
# Paste the input image onto the canvas
new_canvas.paste(input_image, (paste_x, paste_y))
# Save the 'RGBA' image to a temporary file and update 'input_image_path'
with NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
new_canvas.save(tmp_file.name, format="PNG")
input_image_path = tmp_file.name
constants.temp_files.append(tmp_file.name)
# Update 'input_image' with the new image as a file path
input_image = Image.open(input_image_path)
# Use Haishoku to get the palette from the new image
input_palette = Haishoku.loadHaishoku(input_image_path)
input_image_palette = input_palette.palette
# Update colors with opacity
background_color = update_color_opacity(
hex_to_rgb(background_color_hex),
int(background_opacity * (255 / 100))
)
border_color = update_color_opacity(
hex_to_rgb(border_color_hex),
int(border_opacity * (255 / 100))
)
# Prepare excluded colors list
excluded_color_list = [tuple(lst) for lst in excluded_colors_var]
# Generate the hexagon grid images
grid_image = generate_hexagon_grid_interface(
hex_size,
border_size,
input_image,
start_x,
start_y,
end_x,
end_y,
rotation,
background_color,
border_color,
fill_hex,
excluded_color_list,
filter_color,
x_spacing,
y_spacing,
add_hex_text_option,
custom_text_list,
custom_text_color_list
)
return grid_image
def get_model_and_lora(model_textbox):
"""
Determines the model and LoRA weights based on the model_textbox input.
wieghts must be in an array ["Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1"]
"""
# If the input is in the list of models, return it with None as LoRA weights
if model_textbox in constants.MODELS:
return model_textbox, []
# If the input is in the list of LoRA weights, get the corresponding model
elif model_textbox in constants.LORA_WEIGHTS:
model = constants.LORA_TO_MODEL.get(model_textbox)
return model, model_textbox.split()
else:
# Default to a known model if input is unrecognized
default_model = model_textbox
return default_model, []
@spaces.GPU(progress=gr.Progress(track_tqdm=True))
def generate_image_lowmem(
text,
neg_prompt=None,
model_name="black-forest-labs/FLUX.1-dev",
lora_weights=None,
conditioned_image=None,
image_width=1368,
image_height=848,
guidance_scale=3.5,
num_inference_steps=30,
seed=0,
true_cfg_scale=1.0,
pipeline_name="FluxPipeline",
strength=0.75,
additional_parameters=None,
progress=gr.Progress(track_tqdm=True)
):
#from torch import cuda, bfloat16, float32, Generator, no_grad, backends
# Retrieve the pipeline class from the mapping
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
if not pipeline_class:
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
f"Available options: {list(PIPELINE_CLASSES.keys())}")
#initialize_cuda()
device = "cuda" if torch.cuda.is_available() else "cpu"
from src.condition import Condition
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
#print(f"\n {get_torch_info()}\n")
# Disable gradient calculations
with torch.no_grad():
# Initialize the pipeline inside the context manager
pipe = pipeline_class.from_pretrained(
model_name,
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
).to(device)
# Optionally, don't use CPU offload if not necessary
# alternative version that may be more efficient
# pipe.enable_sequential_cpu_offload()
if pipeline_name == "FluxPipeline":
pipe.enable_model_cpu_offload()
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()
else:
pipe.enable_model_cpu_offload()
# Access the tokenizer from the pipeline
tokenizer = pipe.tokenizer
# Check if add_prefix_space is set and convert to slow tokenizer if necessary
if getattr(tokenizer, 'add_prefix_space', False):
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
# Update the pipeline's tokenizer
pipe.tokenizer = tokenizer
pipe.to(device)
flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
if flash_attention_enabled == False:
#Enable xFormers memory-efficient attention (optional)
#pipe.enable_xformers_memory_efficient_attention()
print("\nEnabled xFormers memory-efficient attention.\n")
else:
pipe.attn_implementation="flash_attention_2"
print("\nEnabled flash_attention_2.\n")
condition_type = "subject"
# Load LoRA weights
# note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
if lora_weights:
for lora_weight in lora_weights:
lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
lora_weight_set = False
if lora_configs:
for config in lora_configs:
# Load LoRA weights with optional weight_name and adapter_name
if 'weight_name' in config:
weight_name = config.get("weight_name")
adapter_name = config.get("adapter_name")
lora_collection = config.get("lora_collection")
if weight_name and adapter_name and lora_collection and lora_weight_set == False:
pipe.load_lora_weights(
lora_collection,
weight_name=weight_name,
adapter_name=adapter_name,
token=constants.HF_API_TOKEN
)
lora_weight_set = True
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
pipe.load_lora_weights(
lora_collection,
weight_name=weight_name,
token=constants.HF_API_TOKEN
)
lora_weight_set = True
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
elif weight_name and adapter_name and lora_weight_set == False:
pipe.load_lora_weights(
lora_weight,
weight_name=weight_name,
adapter_name=adapter_name,
token=constants.HF_API_TOKEN
)
lora_weight_set = True
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
elif weight_name and adapter_name==None and lora_weight_set == False:
pipe.load_lora_weights(
lora_weight,
weight_name=weight_name,
token=constants.HF_API_TOKEN
)
lora_weight_set = True
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
elif lora_weight_set == False:
pipe.load_lora_weights(
lora_weight,
token=constants.HF_API_TOKEN
)
lora_weight_set = True
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
# Apply 'pipe' configurations if present
if 'pipe' in config:
pipe_config = config['pipe']
for method_name, params in pipe_config.items():
method = getattr(pipe, method_name, None)
if method:
print(f"Applying pipe method: {method_name} with params: {params}")
method(**params)
else:
print(f"Method {method_name} not found in pipe.")
if 'condition_type' in config:
condition_type = config['condition_type']
if condition_type == "coloring":
#pipe.enable_coloring()
print("\nEnabled coloring.\n")
elif condition_type == "deblurring":
#pipe.enable_deblurring()
print("\nEnabled deblurring.\n")
elif condition_type == "fill":
#pipe.enable_fill()
print("\nEnabled fill.\n")
elif condition_type == "depth":
#pipe.enable_depth()
print("\nEnabled depth.\n")
elif condition_type == "canny":
#pipe.enable_canny()
print("\nEnabled canny.\n")
elif condition_type == "subject":
#pipe.enable_subject()
print("\nEnabled subject.\n")
else:
print(f"Condition type {condition_type} not implemented.")
else:
pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
# Set the random seed for reproducibility
generator = torch.Generator(device=device).manual_seed(seed)
conditions = []
if conditioned_image is not None:
conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
condition = Condition(condition_type, conditioned_image)
conditions.append(condition)
print(f"\nAdded conditioned image.\n {conditioned_image.size}")
# Prepare the parameters for image generation
additional_parameters ={
"strength": strength,
"image": conditioned_image,
}
else:
print("\nNo conditioned image provided.")
if neg_prompt!=None:
true_cfg_scale=1.1
additional_parameters ={
"negative_prompt": neg_prompt,
"true_cfg_scale": true_cfg_scale,
}
# handle long prompts by splitting them
if approximate_token_count(text) > 76:
prompt, prompt2 = split_prompt_precisely(text)
prompt_parameters = {
"prompt" : prompt,
"prompt_2": prompt2
}
else:
prompt_parameters = {
"prompt" :text
}
additional_parameters.update(prompt_parameters)
# Combine all parameters
generate_params = {
"height": image_height,
"width": image_width,
"guidance_scale": guidance_scale,
"num_inference_steps": num_inference_steps,
"generator": generator, }
if additional_parameters:
generate_params.update(additional_parameters)
generate_params = {k: v for k, v in generate_params.items() if v is not None}
print(f"generate_params: {generate_params}")
# Generate the image
result = pipe(**generate_params)
image = result.images[0]
# Clean up
del result
del conditions
del generator
# Delete the pipeline and clear cache
del pipe
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
print(torch.cuda.memory_summary(device=None, abbreviated=False))
return image
def generate_ai_image_local (
map_option,
prompt_textbox_value,
neg_prompt_textbox_value,
model="black-forest-labs/FLUX.1-dev",
lora_weights=None,
conditioned_image=None,
height=512,
width=912,
num_inference_steps=30,
guidance_scale=3.5,
seed=777,
pipeline_name="FluxPipeline",
strength=0.75,
progress=gr.Progress(track_tqdm=True)
):
print(f"Generating image with lowmem")
try:
if map_option != "Prompt":
prompt = constants.PROMPTS[map_option]
negative_prompt = constants.NEGATIVE_PROMPTS.get(map_option, "")
else:
prompt = prompt_textbox_value
negative_prompt = neg_prompt_textbox_value or ""
#full_prompt = f"{prompt} {negative_prompt}"
additional_parameters = {}
if lora_weights:
for lora_weight in lora_weights:
lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
for config in lora_configs:
if 'parameters' in config:
additional_parameters.update(config['parameters'])
elif 'trigger_words' in config:
trigger_words = get_trigger_words(lora_weight)
prompt = f"{trigger_words} {prompt}"
for key, value in additional_parameters.items():
if key in ['height', 'width', 'num_inference_steps', 'max_sequence_length']:
additional_parameters[key] = int(value)
elif key in ['guidance_scale','true_cfg_scale']:
additional_parameters[key] = float(value)
height = additional_parameters.pop('height', height)
width = additional_parameters.pop('width', width)
num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
print("Generating image with the following parameters:")
print(f"Model: {model}")
print(f"LoRA Weights: {lora_weights}")
print(f"Prompt: {prompt}")
print(f"Neg Prompt: {negative_prompt}")
print(f"Height: {height}")
print(f"Width: {width}")
print(f"Number of Inference Steps: {num_inference_steps}")
print(f"Guidance Scale: {guidance_scale}")
print(f"Seed: {seed}")
print(f"Additional Parameters: {additional_parameters}")
print(f"Conditioned Image: {conditioned_image}")
print(f"Conditioned Image Strength: {strength}")
print(f"pipeline: {pipeline_name}")
image = generate_image_lowmem(
text=prompt,
model_name=model,
neg_prompt=negative_prompt,
lora_weights=lora_weights,
conditioned_image=conditioned_image,
image_width=width,
image_height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
seed=seed,
pipeline_name=pipeline_name,
strength=strength,
additional_parameters=additional_parameters
)
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
image.save(tmp.name, format="PNG")
constants.temp_files.append(tmp.name)
print(f"Image saved to {tmp.name}")
#release_torch_resources()
gc.collect()
return tmp.name
except Exception as e:
print(f"Error generating AI image: {e}")
#release_torch_resources()
gc.collect()
return None
@spaces.GPU(duration=140,progress=gr.Progress(track_tqdm=True))
def generate_input_image_click(map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, constants.MAX_SEED)
# Get the model and LoRA weights
model, lora_weights = get_model_and_lora(model_textbox_value)
global current_prerendered_image
conditioned_image=None
if use_conditioned_image:
print(f"Conditioned path: {current_prerendered_image.value}.. converting to RGB\n")
# ensure the conditioned image is an image and not a string, cannot use RGBA
if isinstance(current_prerendered_image.value, str):
conditioned_image = open_image(current_prerendered_image.value).convert("RGB")
print(f"Conditioned Image: {conditioned_image.size}.. converted to RGB\n")
# Convert image_format from a string split by ":" into two numbers divided
width_ratio, height_ratio = map(int, image_format.split(":"))
aspect_ratio = width_ratio / height_ratio
width, height = convert_ratio_to_dimensions(aspect_ratio, 576)
pipeline = "FluxPipeline"
if conditioned_image is not None:
pipeline = "FluxImg2ImgPipeline"
# Generate the AI image and get the image path
image_path = generate_ai_image_local(
map_option,
prompt_textbox_value,
negative_prompt_textbox_value,
model,
lora_weights,
conditioned_image,
strength=strength,
height=height,
width=width,
seed=seed,
pipeline_name=pipeline,
)
# Open the generated image
try:
image = Image.open(image_path).convert("RGBA")
except Exception as e:
print(f"Failed to open generated image: {e}")
return image_path # Return the original image path if opening fails
# Upscale the image
upscaled_image = upscale_image(image, scale_factor)
# Save the upscaled image to a temporary file
with NamedTemporaryFile(delete=False, suffix=".png") as tmp_upscaled:
upscaled_image.save(tmp_upscaled.name, format="PNG")
constants.temp_files.append(tmp_upscaled.name)
print(f"Upscaled image saved to {tmp_upscaled.name}")
# Return the path of the upscaled image
return tmp_upscaled.name
def update_prompt_visibility(map_option):
is_visible = (map_option == "Prompt")
return (
gr.update(visible=is_visible),
gr.update(visible=is_visible),
gr.update(visible=is_visible)
)
def update_prompt_notes(model_textbox_value):
return upd_prompt_notes(model_textbox_value)
def on_prerendered_gallery_selection(event_data: gr.SelectData):
global current_prerendered_image
selected_index = event_data.index
selected_image = constants.pre_rendered_maps_paths[selected_index]
print(f"Gallery Image Selected: {selected_image}\n")
current_prerendered_image.value = selected_image
return current_prerendered_image
def combine_images_with_lerp(input_image, output_image, alpha):
in_image = open_image(input_image)
out_image = open_image(output_image)
print(f"Combining images with alpha: {alpha}")
return lerp_imagemath(in_image, out_image, alpha)
def add_border(image, mask_width, mask_height, blank_color):
#install_torch()
bordered_image_output = Image.open(image).convert("RGBA")
margin_color = detect_color_format(blank_color)
print(f"Adding border to image with width: {mask_width}, height: {mask_height}, color: {margin_color}")
return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
@spaces.GPU()
def getVersions():
return versions_html()
generate_input_image_click.zerogpu = True
def main(debug=False):
title = "HexaGrid Creator"
#description = "Customizable Hexagon Grid Image Generator"
examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac44","#12165380", True]]
gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/"])
# Gradio Blocks Interface
with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty') as beeuty:
with gr.Row():
gr.Markdown("""
# HexaGrid Creator
## Transform Your Images into Mesmerizing Hexagon Grid Masterpieces! ⬢""", elem_classes="intro")
with gr.Row():
with gr.Accordion("Welcome to HexaGrid Creator, the ultimate tool for transforming your images into stunning hexagon grid artworks. Whether you're a tabletop game enthusiast, a digital artist, or someone who loves unique patterns, HexaGrid Creator has something for you.", open=False, elem_classes="intro"):
gr.Markdown ("""
## Drop an image into the Input Image and get started!
## What is HexaGrid Creator?
HexaGrid Creator is a web-based application that allows you to apply a hexagon grid overlay to any image. You can customize the size, color, and opacity of the hexagons, as well as the background and border colors. The result is a visually striking image that looks like it was made from hexagonal tiles!
### What Can You Do?
- **Generate Hexagon Grids:** Create beautiful hexagon grid overlays on any image with fully customizable parameters.
- **AI-Powered Image Generation:** Use advanced AI models to generate images based on your prompts and apply hexagon grids to them.
- **Color Exclusion:** Select and exclude specific colors from your hexagon grid for a cleaner and more refined look.
- **Interactive Customization:** Adjust hexagon size, border size, rotation, background color, and more in real-time.
- **Depth and 3D Model Generation:** Generate depth maps and 3D models from your images for enhanced visualization.
- **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
- **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
- **Add Margins:** Add customizable margins around your images for a polished finish.
### Why You'll Love It
- **Fun and Easy to Use:** With an intuitive interface and real-time previews, creating hexagon grids has never been this fun!
- **Endless Creativity:** Unleash your creativity with endless customization options and see your images transform in unique ways.
- **Hexagon-Inspired Theme:** Enjoy a delightful yellow and purple theme inspired by hexagons! ⬢
- **Advanced AI Models:** Leverage advanced AI models and LoRA weights for high-quality image generation and customization.
### Get Started
1. **Upload or Generate an Image:** Start by uploading your own image or generate one using our AI-powered tool.
2. **Customize Your Grid:** Play around with the settings to create the perfect hexagon grid overlay.
3. **Download and Share:** Once you're happy with your creation, download it and share it with the world!
### Advanced Features
- **Generative AI Integration:** Utilize models like `black-forest-labs/FLUX.1-dev` and various LoRA weights for generating unique images.
- **Pre-rendered Maps:** Access a library of pre-rendered hexagon maps for quick and easy customization.
- **Image Filter [Look-Up Table (LUT)] Application:** Apply filters (LUTs) to your images for color grading and enhancement.
- **Depth and 3D Model Generation:** Create depth maps and 3D models from your images for enhanced visualization.
- **Add Margins:** Customize margins around your images for a polished finish.
Join the hive and start creating with HexaGrid Creator today!
""", elem_classes="intro")
with gr.Row():
from utils.image_utils import convert_to_rgba_png
# Existing code
with gr.Column(scale=2):
input_image = gr.Image(
label="Input Image",
type="filepath",
interactive=True,
elem_classes="centered solid imgcontainer",
key="imgInput",
image_mode=None,
format="PNG",
show_download_button=True,
)
# New code to convert input image to RGBA PNG
def on_input_image_change(image_path):
if image_path is None:
gr.Warning("Please upload an Input Image to get started.")
return None
img, img_path = convert_to_rgba_png(image_path)
return img_path
input_image.change(
fn=on_input_image_change,
inputs=[input_image],
outputs=[input_image], scroll_to_output=True,
)
with gr.Column():
with gr.Accordion("Hex Coloring and Exclusion", open = False):
with gr.Row():
with gr.Column():
color_picker = gr.ColorPicker(label="Pick a color to exclude",value="#505050")
with gr.Column():
filter_color = gr.Checkbox(label="Filter Excluded Colors from Sampling", value=False,)
exclude_color_button = gr.Button("Exclude Color", elem_id="exlude_color_button", elem_classes="solid")
color_display = gr.DataFrame(label="List of Excluded RGBA Colors", headers=["R", "G", "B", "A"], elem_id="excluded_colors", type="array", value=build_dataframe(excluded_color_list), interactive=True, elem_classes="solid centered")
selected_row = gr.Number(0, label="Selected Row", visible=False)
delete_button = gr.Button("Delete Row", elem_id="delete_exclusion_button", elem_classes="solid")
fill_hex = gr.Checkbox(label="Fill Hex with color from Image", value=True)
with gr.Accordion("Image Filters", open = False):
with gr.Row():
with gr.Column():
composite_color = gr.ColorPicker(label="Color", value="#ede9ac44")
with gr.Column():
composite_opacity = gr.Slider(label="Opacity %", minimum=0, maximum=100, value=50, interactive=True)
with gr.Row():
composite_button = gr.Button("Composite", elem_classes="solid")
with gr.Row():
with gr.Column():
lut_filename = gr.Textbox(
value="",
label="Look Up Table (LUT) File Name",
elem_id="lutFileName")
with gr.Column():
lut_file = gr.File(
value=None,
file_count="single",
file_types=[".cube"],
type="filepath",
label="LUT cube File")
with gr.Row():
lut_example_image = gr.Image(type="pil", label="Filter (LUT) Example Image", value=constants.default_lut_example_img)
with gr.Row():
with gr.Column():
gr.Markdown("""
### Included Filters (LUTs)
There are several included Filters:
Try them on the example image before applying to your Input Image.
""", elem_id="lut_markdown")
with gr.Column():
gr.Examples(elem_id="lut_examples",
examples=[[f] for f in constants.lut_files],
inputs=[lut_filename],
outputs=[lut_filename],
label="Select a Filter (LUT) file. Populate the LUT File Name field"
)
with gr.Row():
apply_lut_button = gr.Button("Apply Filter (LUT)", elem_classes="solid", elem_id="apply_lut_button")
lut_file.change(get_filename, inputs=[lut_file], outputs=[lut_filename])
lut_filename.change(show_lut, inputs=[lut_filename, lut_example_image], outputs=[lut_example_image])
apply_lut_button.click(
lambda lut_filename, input_image: gr.Warning("Please upload an Input Image to get started.") if input_image is None else apply_lut_to_image_path(lut_filename, input_image)[0],
inputs=[lut_filename, input_image],
outputs=[input_image],
scroll_to_output=True
)
with gr.Row():
with gr.Accordion("Generative AI", open = False):
with gr.Row():
with gr.Column():
model_options = gr.Dropdown(
label="Model Options",
choices=constants.MODELS + constants.LORA_WEIGHTS + ["Manual Entry"],
value="Cossale/Frames2-Flex.1",
elem_classes="solid"
)
model_textbox = gr.Textbox(
label="LORA/Model",
value="Cossale/Frames2-Flex.1",
elem_classes="solid",
elem_id="inference_model",
visible=False
)
# Update map_options to a Dropdown with choices from constants.PROMPTS keys
with gr.Row():
with gr.Column():
map_options = gr.Dropdown(
label="Map Options",
choices=list(constants.PROMPTS.keys()),
value="Alien Landscape",
elem_classes="solid",
scale=0
)
with gr.Column():
# Add Dropdown for sizing of Images, height and width based on selection. Options are 16x9, 16x10, 4x5, 1x1
# The values of height and width are based on common resolutions for each aspect ratio
# Default to 16x9, 912x512
image_size_ratio = gr.Dropdown(label="Image Size", choices=["16:9", "16:10", "4:5", "4:3", "2:1","3:2","1:1", "9:16", "10:16", "5:4", "3:4","1:2", "2:3"], value="16:9", elem_classes="solid", type="value", scale=0, interactive=True)
with gr.Column():
seed_slider = gr.Slider(
label="Seed",
minimum=0,
maximum=constants.MAX_SEED,
step=1,
value=0,
scale=0
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True, scale=0, interactive=True)
prompt_textbox = gr.Textbox(
label="Prompt",
visible=False,
elem_classes="solid",
value="top-down, (rectangular tabletop_map) alien planet map, Battletech_boardgame scifi world with forests, lakes, oceans, continents and snow at the top and bottom, (middle is dark, no_reflections, no_shadows), from directly above. From 100,000 feet looking straight down",
lines=4
)
negative_prompt_textbox = gr.Textbox(
label="Negative Prompt",
visible=False,
elem_classes="solid",
value="Earth, low quality, bad anatomy, blurry, cropped, worst quality, shadows, people, humans, reflections, shadows, realistic map of the Earth, isometric, text"
)
prompt_notes_label = gr.Label(
"You should use FRM$ as trigger words. @1.5 minutes",
elem_classes="solid centered small",
show_label=False,
visible=False
)
# Keep the change event to maintain functionality
map_options.change(
fn=update_prompt_visibility,
inputs=[map_options],
outputs=[prompt_textbox, negative_prompt_textbox, prompt_notes_label]
)
with gr.Row():
generate_input_image = gr.Button(
"Generate AI Image",
elem_id="generate_input_image",
elem_classes="solid"
)
with gr.Column(scale=2):
with gr.Accordion("Template Image Styles", open = False):
with gr.Row():
# Gallery from PRE_RENDERED_IMAGES GOES HERE
prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images(constants.pre_rendered_maps_paths), elem_id="gallery", elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
with gr.Row():
image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt <-> image)", minimum=0, maximum=1.0, value=0.5, step=0.01, interactive=True)
with gr.Column():
replace_input_image_button = gr.Button(
"Replace Input Image",
elem_id="prerendered_replace_input_image_button",
elem_classes="solid"
)
with gr.Column():
generate_input_image_from_gallery = gr.Button(
"Generate AI Image from Gallery",
elem_id="generate_input_image_from_gallery",
elem_classes="solid"
)
with gr.Accordion("Advanced Hexagon Settings", open = False):
with gr.Row():
start_x = gr.Number(label="Start X", value=0, minimum=-512, maximum= 512, precision=0)
start_y = gr.Number(label="Start Y", value=0, minimum=-512, maximum= 512, precision=0)
end_x = gr.Number(label="End X", value=0, minimum=-512, maximum= 512, precision=0)
end_y = gr.Number(label="End Y", value=0, minimum=-512, maximum= 512, precision=0)
with gr.Row():
x_spacing = gr.Number(label="Adjust Horizontal spacing", value=-1, minimum=-200, maximum=200, precision=1)
y_spacing = gr.Number(label="Adjust Vertical spacing", value=1, minimum=-200, maximum=200, precision=1)
with gr.Row():
rotation = gr.Slider(-90, 180, 0.0, 0.1, label="Hexagon Rotation (degree)")
add_hex_text = gr.Dropdown(label="Add Text to Hexagons", choices=[None, "Row-Column Coordinates", "Sequential Numbers", "Playing Cards Sequential", "Playing Cards Alternate Red and Black", "Custom List"], value=None)
with gr.Row():
custom_text_list = gr.TextArea(label="Custom Text List", value=constants.cards_alternating, visible=False,)
custom_text_color_list = gr.TextArea(label="Custom Text Color List", value=constants.card_colors_alternating, visible=False)
with gr.Row():
hex_text_info = gr.Markdown("""
### Text Color uses the Border Color and Border Opacity, unless you use a custom list.
### The Custom Text List and Custom Text Color List are comma separated lists.
### The custom color list is a comma separated list of hex colors.
#### Example: "A,2,3,4,5,6,7,8,9,10,J,Q,K", "red,#0000FF,#00FF00,red,#FFFF00,#00FFFF,#FF8000,#FF00FF,#FF0080,#FF8000,#FF0080,lightblue"
""", elem_id="hex_text_info", visible=False)
add_hex_text.change(
fn=lambda x: (
gr.update(visible=(x == "Custom List")),
gr.update(visible=(x == "Custom List")),
gr.update(visible=(x != None))
),
inputs=add_hex_text,
outputs=[custom_text_list, custom_text_color_list, hex_text_info]
)
with gr.Row():
hex_size = gr.Number(label="Hexagon Size", value=32, minimum=1, maximum=768)
border_size = gr.Slider(-5,25,value=0,step=1,label="Border Size")
with gr.Row():
background_color = gr.ColorPicker(label="Background Color", value="#000000", interactive=True)
background_opacity = gr.Slider(0,100,0,1,label="Background Opacity %")
border_color = gr.ColorPicker(label="Border Color", value="#7b7b7b", interactive=True)
border_opacity = gr.Slider(0,100,0,1,label="Border Opacity %")
with gr.Row():
hex_button = gr.Button("Generate Hex Grid!", elem_classes="solid", elem_id="btn-generate")
with gr.Row():
output_image = gr.Image(label="Hexagon Grid Image", image_mode = "RGBA", show_download_button=True, show_share_button=True,elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOutput")
overlay_image = gr.Image(label="Hexagon Overlay Image", image_mode = "RGBA", show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgOverlay")
with gr.Row():
output_overlay_composite = gr.Slider(0,100,50,0.5, label="Interpolate Intensity")
output_blend_multiply_composite = gr.Slider(0,100,50,0.5, label="Overlay Intensity")
output_alpha_composite = gr.Slider(0,100,50,0.5, label="Alpha Composite Intensity")
with gr.Accordion("Add Margins (bleed)", open=False):
with gr.Row():
border_image_source = gr.Radio(label="Add Margins around which Image", choices=["Input Image", "Overlay Image"], value="Overlay Image")
with gr.Row():
mask_width = gr.Number(label="Margins Width", value=10, minimum=0, maximum=100, precision=0)
mask_height = gr.Number(label="Margins Height", value=10, minimum=0, maximum=100, precision=0)
with gr.Row():
margin_color = gr.ColorPicker(label="Margin Color", value="#333333FF", interactive=True)
margin_opacity = gr.Slider(0,100,95,0.5,label="Margin Opacity %")
with gr.Row():
add_border_button = gr.Button("Add Margins", elem_classes="solid", variant="secondary")
with gr.Row():
bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", show_download_button=True, show_share_button=True, elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered")
with gr.Accordion("Height Maps and 3D", open = False):
with gr.Row():
with gr.Column():
voxel_size_factor = gr.Slider(label="Voxel Size Factor", value=1.00, minimum=0.01, maximum=40.00, step=0.01)
with gr.Column():
depth_image_source = gr.Radio(label="Depth Image Source", choices=["Input Image", "Output Image", "Overlay Image","Image with Margins"], value="Input Image")
with gr.Row():
generate_depth_button = gr.Button("Generate Depth Map and 3D Model From Selected Image", elem_classes="solid", variant="secondary")
with gr.Row():
depth_map_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgDepth")
model_output = gr.Model3D(label="3D Model", clear_color=[1.0, 1.0, 1.0, 0.25], key="Img3D", elem_classes="centered solid imgcontainer")
with gr.Row():
gr.Examples(examples=[
["assets//examples//hex_map_p1.png", False, True, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 15],
["assets//examples//hex_map_p1_overlayed.png", False, False, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 75],
["assets//examples//hex_flower_logo.png", False, True, -95,-95,100,100,-24,-2,190,30,2,"#FF8951", 50],
["assets//examples//hexed_fract_1.png", False, True, 0,0,0,0,0,0,10,0,0,"#000000", 5],
["assets//examples//tmpzt3mblvk.png", False, True, -20,10,0,0,-6,-2,35,30,1,"#ffffff", 0],
],
inputs=[input_image, filter_color, fill_hex, start_x, start_y, end_x, end_y, x_spacing, y_spacing, hex_size, rotation, border_size, border_color, border_opacity],
elem_id="examples")
with gr.Row():
gr.HTML(value=getVersions(), visible=True, elem_id="versions")
# with gr.Row():
# reinstall_torch = gr.Button("Reinstall Torch", elem_classes="solid small", variant="secondary")
# reinstall_cuda_toolkit = gr.Button("Install CUDA Toolkit", elem_classes="solid small", variant="secondary")
# reinitialize_cuda = gr.Button("Reinitialize CUDA", elem_classes="solid small", variant="secondary")
# torch_release = gr.Button("Release Torch Resources", elem_classes="solid small", variant="secondary")
# reinitialize_cuda.click(
# fn=initialize_cuda,
# inputs=[],
# outputs=[]
# )
# torch_release.click(
# fn=release_torch_resources,
# inputs=[],
# outputs=[]
# )
# reinstall_torch.click(
# fn=install_torch,
# inputs=[],
# outputs=[]
# )
# reinstall_cuda_toolkit.click(
# fn=install_cuda_toolkit,
# inputs=[],
# outputs=[]
# )
color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
delete_button.click(fn=delete_color, inputs=[selected_row, color_display], outputs=[color_display])
exclude_color_button.click(fn=add_color, inputs=[color_picker, gr.State(excluded_color_list)], outputs=[color_display, gr.State(excluded_color_list)])
hex_button.click(
fn=lambda hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list:
gr.Warning("Please upload an Input Image to get started.") if input_image is None else hex_create(hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list),
inputs=[hex_size, border_size, input_image, start_x, start_y, end_x, end_y, rotation, background_color, background_opacity, border_color, border_opacity, fill_hex, color_display, filter_color, x_spacing, y_spacing, add_hex_text, custom_text_list, custom_text_color_list],
outputs=[output_image, overlay_image],
scroll_to_output=True
)
generate_input_image.click(
fn=generate_input_image_click,
inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False), gr.State(0.5), image_size_ratio],
outputs=[input_image], scroll_to_output=True
)
generate_depth_button.click(
fn=generate_depth_button_click,
inputs=[depth_image_source, voxel_size_factor, input_image, output_image, overlay_image, bordered_image_output],
outputs=[depth_map_output, model_output], scroll_to_output=True
)
model_textbox.change(
fn=update_prompt_notes,
inputs=model_textbox,
outputs=prompt_notes_label,preprocess=False
)
model_options.change(
fn=lambda x: (gr.update(visible=(x == "Manual Entry")), gr.update(value=x) if x != "Manual Entry" else gr.update()),
inputs=model_options,
outputs=[model_textbox, model_textbox]
)
model_options.change(
fn=update_prompt_notes,
inputs=model_options,
outputs=prompt_notes_label
)
composite_button.click(
fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
inputs=[input_image, composite_color, composite_opacity],
outputs=[input_image]
)
#use conditioned_image as the input_image for generate_input_image_click
generate_input_image_from_gallery.click(
fn=generate_input_image_click,
inputs=[map_options, prompt_textbox, negative_prompt_textbox, model_textbox,randomize_seed, seed_slider, gr.State(True), image_guidance_stength, image_size_ratio],
outputs=[input_image], scroll_to_output=True
)
# Update the state variable with the prerendered image filepath when an image is selected
prerendered_image_gallery.select(
fn=on_prerendered_gallery_selection,
inputs=None,
outputs=[gr.State(current_prerendered_image)], # Update the state with the selected image
show_api=False
)
# replace input image with selected gallery image
replace_input_image_button.click(
lambda: current_prerendered_image.value,
inputs=None,
outputs=[input_image], scroll_to_output=True
)
output_overlay_composite.change(
fn=combine_images_with_lerp,
inputs=[input_image, output_image, output_overlay_composite],
outputs=[overlay_image], scroll_to_output=True
)
output_blend_multiply_composite.change(
fn=multiply_and_blend_images,
inputs=[input_image, output_image, output_blend_multiply_composite],
outputs=[overlay_image],
scroll_to_output=True
)
output_alpha_composite.change(
fn=alpha_composite_with_control,
inputs=[input_image, output_image, output_alpha_composite],
outputs=[overlay_image],
scroll_to_output=True
)
add_border_button.click(
fn=lambda image_source, mask_w, mask_h, color, opacity, input_img, overlay_img: add_border(input_img if image_source == "Input Image" else overlay_img, mask_w, mask_h, update_color_opacity(detect_color_format(color), opacity * 2.55)),
inputs=[border_image_source, mask_width, mask_height, margin_color, margin_opacity, input_image, overlay_image],
outputs=[bordered_image_output],
scroll_to_output=True
)
beeuty.queue(default_concurrency_limit=2,max_size=12,api_open=False)
beeuty.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
if __name__ == "__main__":
logging.basicConfig(
format="[%(levelname)s] %(asctime)s %(message)s", level=logging.INFO
)
logging.info("Environment Variables: %s" % os.environ)
# if _get_output(["nvcc", "--version"]) is None:
# logging.info("Installing CUDA toolkit...")
# install_cuda_toolkit()
# else:
# logging.info("Detected CUDA: %s" % _get_output(["nvcc", "--version"]))
# logging.info("Installing CUDA extensions...")
# setup_runtime_env()
#main(os.getenv("DEBUG") == "1")
main()