File size: 7,557 Bytes
f6a0759 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import gradio as gr
from fetch import get_values
from dotenv import load_dotenv
load_dotenv()
import prodia
import requests
import random
from datetime import datetime
import os
prodia_key = os.getenv('PRODIA_X_KEY', None)
if prodia_key is None:
print("Please set PRODIA_X_KEY in .env, closing...")
exit()
client = prodia.Client(api_key=prodia_key)
def process_input_text2img(prompt, negative_prompt, steps, cfg_scale, number, seed, model, sampler, aspect_ratio, upscale, save=False):
images = []
for image in range(number):
result = client.txt2img(prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler,
steps=steps, cfg_scale=cfg_scale, seed=seed, aspect_ratio=aspect_ratio, upscale=upscale)
images.append(result.url)
if save:
date = datetime.now()
if not os.path.isdir(f'./outputs/{date.year}-{date.month}-{date.day}'):
os.mkdir(f'./outputs/{date.year}-{date.month}-{date.day}')
img_data = requests.get(result.url).content
with open(f"./outputs/{date.year}-{date.month}-{date.day}/{random.randint(1, 10000000000000)}_{result.seed}.png", "wb") as f:
f.write(img_data)
return images
def process_input_img2img(init, prompt, negative_prompt, steps, cfg_scale, number, seed, model, sampler, ds, upscale, save):
images = []
for image in range(number):
result = client.img2img(imageUrl=init, prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler,
steps=steps, cfg_scale=cfg_scale, seed=seed, denoising_strength=ds, upscale=upscale)
images.append(result.url)
if save:
date = datetime.now()
if not os.path.isdir(f'./outputs/{date.year}-{date.month}-{date.day}'):
os.mkdir(f'./outputs/{date.year}-{date.month}-{date.day}')
img_data = requests.get(result.url).content
with open(f"./outputs/{date.year}-{date.month}-{date.day}/{random.randint(1, 10000000000000)}_{result.seed}.png", "wb") as f:
f.write(img_data)
return images
"""
def process_input_control(init, prompt, negative_prompt, steps, cfg_scale, number, seed, model, control_model, sampler):
images = []
for image in range(number):
result = client.controlnet(imageUrl=init, prompt=prompt, negative_prompt=negative_prompt, model=model, sampler=sampler,
steps=steps, cfg_scale=cfg_scale, seed=seed, controlnet_model=control_model)
images.append(result.url)
return images
"""
theme = gr.themes.Base(
primary_hue=gr.themes.Color(c100="#dbeafe", c200="#bfdbfe", c300="#93c5fd", c400="#60a5fa", c50="#eff6ff", c500="#3b82f6", c600="#2563eb", c700="#fb3657", c800="#1e40af", c900="#1e3a8a", c950="#1d3660"),
neutral_hue=gr.themes.Color(c100="#e0e7ff", c200="#c7d2fe", c300="#3c4367", c400="#b5b5b5", c50="#eef2ff", c500="#757575", c600="#221935", c700="#09001b", c800="#0f0e27", c900="#0f0e27", c950="#09001b"),
).set(
block_background_fill='*background_fill_secondary'
)
with gr.Blocks(theme=theme) as demo:
gr.Markdown("""
# Prodia by @xAbdoAT
This is simple web-gui for using Prodia API easily, build on Python, gradio, prodiapy
""")
with gr.Tab(label="text2img"):
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", lines=2, placeholder="puppies in a cloud, 4k")
negative = gr.Textbox(label="Negative Prompt", lines=3, placeholder="Add words you don't want to show up in your art...")
with gr.Row():
steps = gr.Slider(label="Steps", value=30, step=1, maximum=50, minimum=1, interactive=True)
cfg = gr.Slider(label="CFG Scale", maximum=20, minimum=1, value=7, interactive=True)
with gr.Row():
num = gr.Slider(label="Number of images", value=1, step=1, minimum=1, interactive=True)
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=4294967295, interactive=True)
with gr.Row():
model = gr.Dropdown(label="Model", choices=get_values()[0], value="v1-5-pruned-emaonly.ckpt [81761151]", interactive=True)
sampler = gr.Dropdown(label="Sampler", choices=get_values()[1], value="DDIM", interactive=True)
with gr.Row():
ar = gr.Radio(label="Aspect Ratio", choices=["square", "portrait", "landscape"], value="square", interactive=True)
with gr.Column():
upscale = gr.Checkbox(label="upscale", interactive=True)
with gr.Row():
run_btn = gr.Button("Run", variant="primary")
with gr.Column():
result_image = gr.Gallery(label="Result Image(s)")
run_btn.click(
process_input_text2img,
inputs=[
prompt,
negative,
steps,
cfg,
num,
seed,
model,
sampler,
ar,
upscale
],
outputs=[result_image],
)
with gr.Tab(label="img2img"):
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", lines=2, placeholder="puppies in a cloud, 4k")
with gr.Row():
negative = gr.Textbox(label="Negative Prompt", lines=3, placeholder="Add words you don't want to show up in your art...")
init_image = gr.Textbox(label="Init Image Url", lines=2, placeholder="https://cdn.openai.com/API/images/guides/image_generation_simple.webp")
with gr.Row():
steps = gr.Slider(label="Steps", value=30, step=1, maximum=50, minimum=1, interactive=True)
cfg = gr.Slider(label="CFG Scale", maximum=20, minimum=1, value=7, interactive=True)
with gr.Row():
num = gr.Slider(label="Number of images", value=1, step=1, minimum=1, interactive=True)
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=4294967295, interactive=True)
with gr.Row():
model = gr.Dropdown(label="Model", choices=get_values()[0], value="v1-5-pruned-emaonly.ckpt [81761151]", interactive=True)
sampler = gr.Dropdown(label="Sampler", choices=get_values()[1], value="DDIM", interactive=True)
with gr.Row():
ds = gr.Slider(label="Denoising strength", maximum=0.9, minimum=0.1, value=0.5, interactive=True)
with gr.Column():
upscale = gr.Checkbox(label="upscale", interactive=True)
with gr.Row():
run_btn = gr.Button("Run", variant="primary")
with gr.Column():
result_image = gr.Gallery(label="Result Image(s)")
run_btn.click(
process_input_img2img,
inputs=[
init_image,
prompt,
negative,
steps,
cfg,
num,
seed,
model,
sampler,
ds,
upscale
],
outputs=[result_image],
)
if __name__ == "__main__":
demo.launch(show_api=True) |