Spaces:
Running
Running
import gradio as gr | |
from all_models import models | |
from externalmod import gr_Interface_load, save_image, randomize_seed | |
import asyncio | |
import os | |
from threading import RLock | |
from datetime import datetime | |
preSetPrompt = "cute tall slender athletic 20+ caucasian woman. gorgeous face. perky tits. sensual expression. lifting shirt. photorealistic. cinematic. f1.4" | |
# preSetPrompt = "cute tall slender athletic 20+ nude caucasian woman. gorgeous face. perky tits. gaping outie pussy. pussy juice. sly smile. explicit pose. artistic. photorealistic. cinematic. f1.4" | |
# H. R. Giger prompt: | |
# preSetPrompt = "a tall slender athletic caucasian nude 18+ female cyborg. gorgeous face. perky tits. wet skin. sensual expression. she is entangled in rusty chains, rusty barbed wire and electric cables. old dark dusty decaying spaceship designed by h.r. giger. rusty metal dildos. wet tubes and wet plastic hoses. dark, gloomy teal cinematic light. photorealistic." | |
negPreSetPrompt = "[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness" | |
lock = RLock() | |
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary. | |
def get_current_time(): | |
now = datetime.now() | |
now2 = now | |
current_time = now2.strftime("%y-%m-%d %H:%M:%S") | |
return current_time | |
def load_fn(models): | |
global models_load | |
models_load = {} | |
for model in models: | |
if model not in models_load.keys(): | |
try: | |
m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN) | |
except Exception as error: | |
print(error) | |
m = gr.Interface(lambda: None, ['text'], ['image']) | |
models_load.update({model: m}) | |
load_fn(models) | |
num_models = 6 | |
max_images = 6 | |
inference_timeout = 400 | |
default_models = models[:num_models] | |
MAX_SEED = 2**32-1 | |
def extend_choices(choices): | |
return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA'] | |
def update_imgbox(choices): | |
choices_plus = extend_choices(choices[:num_models]) | |
return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus] | |
def random_choices(): | |
import random | |
random.seed() | |
return random.choices(models, k=num_models) | |
# https://huggingface.co/docs/api-inference/detailed_parameters | |
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client | |
async def infer(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1, timeout=inference_timeout): | |
kwargs = {} | |
if height > 0: kwargs["height"] = height | |
if width > 0: kwargs["width"] = width | |
if steps > 0: kwargs["num_inference_steps"] = steps | |
if cfg > 0: cfg = kwargs["guidance_scale"] = cfg | |
if seed == -1: | |
theSeed = randomize_seed() | |
kwargs["seed"] = theSeed | |
else: | |
kwargs["seed"] = seed | |
theSeed = seed | |
task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn, prompt=prompt, negative_prompt=nprompt, **kwargs, token=HF_TOKEN)) | |
await asyncio.sleep(0) | |
try: | |
result = await asyncio.wait_for(task, timeout=timeout) | |
except asyncio.TimeoutError as e: | |
print(e) | |
print(f"Task timed out: {model_str}") | |
if not task.done(): task.cancel() | |
result = None | |
raise Exception(f"Task timed out: {model_str}") from e | |
except Exception as e: | |
print(e) | |
if not task.done(): task.cancel() | |
result = None | |
raise Exception() from e | |
if task.done() and result is not None and not isinstance(result, tuple): | |
with lock: | |
# png_path = "img.png" | |
# png_path = get_current_time() + "_" + model_str.replace("/", "_") + ".png" | |
# png_path = model_str.replace("/", "_") + " - " + prompt + " - " + get_current_time() + ".png" | |
png_path = model_str.replace("/", "_") + " - " + get_current_time() + "_" + str(theSeed) + ".png" | |
image = save_image(result, png_path, model_str, prompt, nprompt, height, width, steps, cfg, seed) | |
return image | |
return None | |
def gen_fn(model_str, prompt, nprompt="", height=0, width=0, steps=0, cfg=0, seed=-1): | |
try: | |
loop = asyncio.new_event_loop() | |
result = loop.run_until_complete(infer(model_str, prompt, nprompt, | |
height, width, steps, cfg, seed, inference_timeout)) | |
except (Exception, asyncio.CancelledError) as e: | |
print(e) | |
print(f"Task aborted: {model_str}") | |
result = None | |
raise gr.Error(f"Task aborted: {model_str}, Error: {e}") | |
finally: | |
loop.close() | |
return result | |
def add_gallery(image, model_str, gallery): | |
if gallery is None: gallery = [] | |
with lock: | |
if image is not None: gallery.insert(0, (image, model_str)) | |
return gallery | |
js_func = """ | |
function refresh() { | |
const url = new URL(window.location); | |
if (url.searchParams.get('__theme') !== 'dark') { | |
url.searchParams.set('__theme', 'dark'); | |
window.location.href = url.href; | |
} | |
} | |
""" | |
js_AutoSave=""" | |
console.log("Yo"); | |
var img1 = document.querySelector("div#component-355 .svelte-1kpcxni button.svelte-1kpcxni .svelte-1kpcxni img"), | |
observer = new MutationObserver((changes) => { | |
changes.forEach(change => { | |
if(change.attributeName.includes('src')){ | |
console.log(img1.src); | |
document.querySelector("div#component-355 .svelte-1kpcxni .svelte-sr71km a.svelte-1s8vnbx button").click(); | |
} | |
}); | |
}); | |
observer.observe(img1, {attributes : true}); | |
""" | |
CSS=""" | |
.gradio-container { max-width: 1200px; margin: 0 auto; background: linear-gradient(to bottom, #1a1a1a, #2d2d2d); !important; } | |
.output { | |
width: 112px; | |
height: 112px; | |
border-radius: 10px; | |
box-shadow: 0 4px 8px rgba(0,0,0,0.2); | |
transition: transform 0.2s; | |
!important; | |
} | |
.output:hover { | |
transform: scale(1.05); | |
} | |
.gallery { | |
min-width: 512px; | |
min-height: 512px; | |
max-height: 512px; | |
border-radius: 15px; | |
box-shadow: 0 6px 12px rgba(0,0,0,0.3); | |
!important; | |
} | |
.guide { text-align: center; color: #e0e0e0; !important; } | |
.primary-btn { | |
background: linear-gradient(45deg, #4a90e2, #357abd); | |
border-radius: 8px; | |
transition: all 0.3s ease; | |
} | |
.primary-btn:hover { | |
transform: translateY(-2px); | |
box-shadow: 0 5px 15px rgba(74,144,226,0.3); | |
} | |
""" | |
with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', fill_width=True, css=CSS) as demo: | |
gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fgunship999-SexyImages.hf.space"> | |
<img src="https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fgunship999-SexyImages.hf.space&countColor=%23263759" /> | |
</a>""") | |
with gr.Column(scale=2): | |
# 모델 선택 부분 추가 | |
with gr.Accordion("Model Selection", open=True): | |
model_choice = gr.CheckboxGroup( | |
models, | |
label=f'Choose up to {int(num_models)} models', | |
value=default_models, | |
interactive=True | |
) | |
with gr.Group(): | |
txt_input = gr.Textbox( | |
label='Your prompt:', | |
value=preSetPrompt, | |
lines=3, | |
autofocus=1 | |
) | |
neg_input = gr.Textbox( | |
label='Negative prompt:', | |
value=negPreSetPrompt, | |
lines=1 | |
) | |
with gr.Accordion("Advanced Settings", open=False): | |
with gr.Row(): | |
width = gr.Slider(label="Width", maximum=1216, step=32, value=0) | |
height = gr.Slider(label="Height", maximum=1216, step=32, value=0) | |
with gr.Row(): | |
steps = gr.Slider(label="Steps", maximum=100, step=1, value=0) | |
cfg = gr.Slider(label="Guidance Scale", maximum=30.0, step=0.1, value=0) | |
seed = gr.Slider(label="Seed", minimum=-1, maximum=MAX_SEED, step=1, value=-1) | |
seed_rand = gr.Button("🎲", size="sm", elem_classes="primary-btn") | |
seed_rand.click(randomize_seed, None, [seed], queue=False) | |
with gr.Row(): | |
gen_button = gr.Button( | |
f'Generate {int(num_models)} Images', | |
variant='primary', | |
scale=3, | |
elem_classes="primary-btn" | |
) | |
random_button = gr.Button( | |
'Randomize Models', | |
variant='secondary', | |
scale=1 | |
) | |
with gr.Column(scale=1): | |
with gr.Group(): | |
with gr.Row(): | |
output = [gr.Image(label=m, show_download_button=True, | |
elem_classes="output", | |
interactive=False, width=112, height=112, | |
show_share_button=False, format="png", | |
visible=True) for m in default_models] | |
current_models = [gr.Textbox(m, visible=False) | |
for m in default_models] | |
with gr.Column(scale=2): | |
gallery = gr.Gallery( | |
label="Generated Images", | |
show_download_button=True, | |
elem_classes="gallery", | |
interactive=False, | |
show_share_button=False, | |
container=True, | |
format="png", | |
preview=True, | |
object_fit="cover", | |
columns=2, | |
rows=2 | |
) | |
# 이벤트 핸들러 추가 | |
model_choice.change(update_imgbox, model_choice, output) | |
model_choice.change(extend_choices, model_choice, current_models) | |
random_button.click(random_choices, None, model_choice) | |
for m, o in zip(current_models, output): | |
gen_event = gr.on( | |
triggers=[gen_button.click, txt_input.submit], | |
fn=gen_fn, | |
inputs=[m, txt_input, neg_input, height, width, steps, cfg, seed], | |
outputs=[o], | |
concurrency_limit=None, | |
queue=False | |
) | |
o.change(add_gallery, [o, m, gallery], [gallery]) | |
demo.launch(show_api=False, max_threads=400) |