import gradio as gr
import requests
import time
import json
import base64
import os
from io import BytesIO
import html
import re
from PIL import Image # Add this import
class Prodia:
def __init__(self, api_key, base=None):
self.base = base or "https://api.prodia.com/v1"
self.headers = {
"X-Prodia-Key": api_key
}
def generate(self, params):
response = self._post(f"{self.base}/sd/generate", params)
return response.json()
def transform(self, params):
response = self._post(f"{self.base}/sd/transform", params)
return response.json()
def controlnet(self, params):
response = self._post(f"{self.base}/sd/controlnet", params)
return response.json()
def get_job(self, job_id):
response = self._get(f"{self.base}/job/{job_id}")
return response.json()
def wait(self, job):
job_result = job
while job_result['status'] not in ['succeeded', 'failed']:
time.sleep(0.25)
job_result = self.get_job(job['job'])
return job_result
def list_models(self):
response = self._get(f"{self.base}/sd/models")
return response.json()
def list_samplers(self):
response = self._get(f"{self.base}/sd/samplers")
return response.json()
def list_loras(self):
response = self._get(f"{self.base}/sd/loras")
return response.json()
def _post(self, url, params):
headers = {
**self.headers,
"Content-Type": "application/json"
}
response = requests.post(url, headers=headers, data=json.dumps(params))
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def _get(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def image_to_base64(image):
# Convert the image to bytes
buffered = BytesIO()
image.save(buffered, format="PNG") # You can change format to PNG if needed
# Encode the bytes to base64
img_str = base64.b64encode(buffered.getvalue())
return img_str.decode('utf-8') # Convert bytes to string
def remove_id_and_ext(text):
text = re.sub(r'\[.*\]$', '', text)
extension = text[-12:].strip()
if extension == "safetensors":
text = text[:-13]
elif extension == "ckpt":
text = text[:-4]
return text
def get_data(text):
results = {}
patterns = {
'prompt': r'(.*)',
'negative_prompt': r'Negative prompt: (.*)',
'steps': r'Steps: (\d+),',
'seed': r'Seed: (\d+),',
'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
'model': r'Model:\s*([^\s,]+)',
'cfg_scale': r'CFG scale:\s*([\d\.]+)',
'size': r'Size:\s*([0-9]+x[0-9]+)',
}
for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
match = re.search(patterns[key], text)
if match:
results[key] = match.group(1)
else:
results[key] = None
if results['size'] is not None:
w, h = results['size'].split("x")
results['w'] = w
results['h'] = h
else:
results['w'] = None
results['h'] = None
return results
def send_to_txt2img(image):
result = {tabs: gr.update(selected="t2i")}
try:
text = image.info['parameters']
data = get_data(text)
result[prompt] = gr.update(value=data['prompt'])
result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
if model in model_names:
result[model] = gr.update(value=model_names[model])
else:
result[model] = gr.update()
return result
except Exception as e:
print(e)
return result
prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
model_list = prodia_client.list_models()
model_names = {}
for model_name in model_list:
name_without_ext = remove_id_and_ext(model_name)
model_names[name_without_ext] = model_name
def add_watermark(image_path, watermark_path, position, scale=1.5):
base_image = Image.open(image_path)
watermark = Image.open(watermark_path).convert("RGBA")
# Resize the watermark
watermark = watermark.resize((int(watermark.width * scale), int(watermark.height * scale)), Image.LANCZOS)
# Calculate the position
if position == 'bottom_right':
position = (base_image.width - watermark.width, base_image.height - watermark.height)
# Paste the watermark
base_image.paste(watermark, position, watermark)
return base_image
def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed, lora, batch_size, batch_count):
images = []
for _ in range(batch_count):
for _ in range(batch_size): # Loop for batch size
result = prodia_client.generate({
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"cfg_scale": cfg_scale,
"upscale": True,
"width": width,
"height": height,
"seed": seed,
"lora": lora
})
job = prodia_client.wait(result)
print(job) # Debugging: print the job dictionary
if "imageUrl" not in job:
raise KeyError(f"'imageUrl' not found in job response: {job}")
image_url = job["imageUrl"]
print(f"Downloading image from URL: {image_url}") # Debugging: print the image URL
# Download the image
response = requests.get(image_url)
image_path = f"generated_image_{len(images)}.png"
with open(image_path, "wb") as f:
f.write(response.content)
# Add watermark
watermarked_image = add_watermark(image_path, "logo.webp", "bottom_right")
watermarked_image.save(image_path)
images.append(image_path)
return images
def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed, lora, batch_size, batch_count):
images = []
for _ in range(batch_count):
for _ in range(batch_size): # Loop for batch size
result = prodia_client.transform({
"imageData": image_to_base64(input_image),
"denoising_strength": denoising,
"prompt": prompt,
"negative_prompt": negative_prompt,
"model": model,
"steps": steps,
"sampler": sampler,
"upscale": True,
"cfg_scale": cfg_scale,
"width": width,
"height": height,
"seed": seed,
"lora": lora
})
job = prodia_client.wait(result)
print(job) # Debugging: print the job dictionary
if "imageUrl" not in job:
raise KeyError(f"'imageUrl' not found in job response: {job}")
image_url = job["imageUrl"]
print(f"Downloading image from URL: {image_url}") # Debugging: print the image URL
# Download the image
response = requests.get(image_url)
image_path = f"transformed_image_{len(images)}.png"
with open(image_path, "wb") as f:
f.write(response.content)
# Add watermark
watermarked_image = add_watermark(image_path, "logo.webp", "bottom_right")
watermarked_image.save(image_path)
images.append(image_path)
return images
css = """
#generate {
height: 100%;
}
"""
loras = prodia_client.list_loras()
# Set default LoRA
default_lora = "more_details_v10" if "more_details_v10" in loras else loras[0]
with gr.Blocks(css=css, theme=gr.themes.Monochrome()) as demo: # Apply the Soft theme
with gr.Row():
with gr.Column(scale=6):
model = gr.Dropdown(interactive=True, value="amIReal_V41.safetensors [0a8a2e61]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
lora = gr.Dropdown(interactive=True, value=default_lora, show_label=True, label="LoRA", choices=loras) # Set default LoRA
with gr.Tabs() as tabs:
with gr.Tab("txt2img", id='t2i'):
with gr.Row():
with gr.Column(scale=6, min_width=600):
prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="(3d, render, cgi, doll, painting, fake, cartoon, 3d modeling:1.4), (worst quality, low quality:1.4), child, deformed, malformed, bad teeth, bad hands, bad fingers, bad eyes, long body, blurry, duplicated, cloned, duplicate body parts, disfigured, extra limbs, fused fingers, extra fingers, twisted, distorted, malformed hands, mutated hands, mutated fingers, conjoined, missing limbs, bad anatomy, bad proportions, logo, watermark, text, copyright, signature, lowres, mutated, mutilated, artifacts, gross, ugly, tattoo, weird lettering, weird drawing, easynegative, FastNegativeV2")
with gr.Column():
text_button = gr.Button("Generate", variant='primary', elem_id="generate")
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation"):
with gr.Row():
with gr.Column(scale=1):
sampler = gr.Dropdown(value="DPM++ 2M Karras", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
with gr.Column(scale=1):
steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
with gr.Row():
with gr.Column(scale=1):
width = gr.Slider(label="Width", maximum=1024, value=640, step=8)
height = gr.Slider(label="Height", maximum=1024, value=1024, step=8)
with gr.Column(scale=1):
batch_size = gr.Slider(label="Batch Size", minimum=1, maximum=4, value=1, step=1) # Add batch size slider
batch_count = gr.Slider(label="Batch Count", minimum=1, maximum=4, value=1, step=1) # Add batch count slider
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
seed = gr.Number(label="Seed", value=-1)
with gr.Column(scale=2):
image_output = gr.Gallery(label="Generated Images") # Use Gallery to display multiple images
text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
seed, lora, batch_size, batch_count], outputs=image_output, concurrency_limit=64) # Add batch size and count inputs
with gr.Tab("img2img", id='i2i'):
with gr.Row():
with gr.Column(scale=6, min_width=600):
i2i_prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="(3d, render, cgi, doll, painting, fake, cartoon, 3d modeling:1.4), (worst quality, low quality:1.4), child, deformed, malformed, bad teeth, bad hands, bad fingers, bad eyes, long body, blurry, duplicated, cloned, duplicate body parts, disfigured, extra limbs, fused fingers, extra fingers, twisted, distorted, malformed hands, mutated hands, mutated fingers, conjoined, missing limbs, bad anatomy, bad proportions, logo, watermark, text, copyright, signature, lowres, mutated, mutilated, artifacts, gross, ugly, tattoo, weird lettering, weird drawing, easynegative, FastNegativeV2")
with gr.Column():
i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
with gr.Row():
with gr.Column(scale=3):
with gr.Tab("Generation"):
i2i_image_input = gr.Image(type="pil")
with gr.Row():
with gr.Column(scale=1):
i2i_sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
with gr.Column(scale=1):
i2i_steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=25, value=20, step=1)
with gr.Row():
with gr.Column(scale=1):
i2i_width = gr.Slider(label="Width", maximum=1024, value=640, step=8)
i2i_height = gr.Slider(label="Height", maximum=1024, value=1024, step=8)
with gr.Column(scale=1):
i2i_batch_size = gr.Slider(label="Batch Size", minimum=1, maximum=4, value=1, step=1) # Add batch size slider
i2i_batch_count = gr.Slider(label="Batch Count", minimum=1, maximum=4, value=1, step=1) # Add batch count slider
i2i_cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
i2i_seed = gr.Number(label="Seed", value=-1)
with gr.Column(scale=2):
i2i_image_output = gr.Gallery(label="Transformed Images") # Use Gallery to display multiple images
i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
i2i_seed, lora, i2i_batch_size, i2i_batch_count], outputs=i2i_image_output, concurrency_limit=64) # Add batch size and count inputs
with gr.Tab("PNG Info"):
def plaintext_to_html(text, classname=None):
content = "
\n".join(html.escape(x) for x in text.split('\n'))
return f"
{content}
" if classname else f"{content}
" def get_exif_data(image): items = image.info info = '' for key, text in items.items(): info += f"""{plaintext_to_html(str(key))}
{plaintext_to_html(str(text))}
{message}