Dao3's picture
Update app.py
8e15d03
raw
history blame
10.5 kB
import gradio as gr
import os
import sys
from pathlib import Path
import random
import string
import time
from queue import Queue
from threading import Thread
import emoji
text_gen=gr.Interface.load("spaces/Dao3/MagicPrompt-Stable-Diffusion")
def get_prompts(prompt_text):
if prompt_text:
return text_gen("photo, " + prompt_text)
else:
return text_gen("")
proc1=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-2.0")
def restart_script_periodically():
while True:
random_time = random.randint(540, 600)
time.sleep(random_time)
os.execl(sys.executable, sys.executable, *sys.argv)
restart_thread = Thread(target=restart_script_periodically, daemon=True)
restart_thread.start()
queue = Queue()
queue_threshold = 100
def add_random_noise(prompt, noise_level=0.00):
if noise_level == 0:
noise_level = 0.00
percentage_noise = noise_level * 5
num_noise_chars = int(len(prompt) * (percentage_noise/100))
noise_indices = random.sample(range(len(prompt)), num_noise_chars)
prompt_list = list(prompt)
noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits)
noise_chars.extend(['๐Ÿ˜', '๐Ÿ’ฉ', '๐Ÿ˜‚', '๐Ÿค”', '๐Ÿ˜Š', '๐Ÿค—', '๐Ÿ˜ญ', '๐Ÿ™„', '๐Ÿ˜ท', '๐Ÿคฏ', '๐Ÿคซ', '๐Ÿฅด', '๐Ÿ˜ด', '๐Ÿคฉ', '๐Ÿฅณ', '๐Ÿ˜”', '๐Ÿ˜ฉ', '๐Ÿคช', '๐Ÿ˜‡', '๐Ÿคข', '๐Ÿ˜ˆ', '๐Ÿ‘น', '๐Ÿ‘ป', '๐Ÿค–', '๐Ÿ‘ฝ', '๐Ÿ’€', '๐ŸŽƒ', '๐ŸŽ…', '๐ŸŽ„', '๐ŸŽ', '๐ŸŽ‚', '๐ŸŽ‰', '๐ŸŽˆ', '๐ŸŽŠ', '๐ŸŽฎ', 'โค๏ธ', '๐Ÿ’”', '๐Ÿ’•', '๐Ÿ’–', '๐Ÿ’—', '๐Ÿถ', '๐Ÿฑ', '๐Ÿญ', '๐Ÿน', '๐ŸฆŠ', '๐Ÿป', '๐Ÿจ', '๐Ÿฏ', '๐Ÿฆ', '๐Ÿ˜', '๐Ÿ”ฅ', '๐ŸŒง๏ธ', '๐ŸŒž', '๐ŸŒˆ', '๐Ÿ’ฅ', '๐ŸŒด', '๐ŸŒŠ', '๐ŸŒบ', '๐ŸŒป', '๐ŸŒธ', '๐ŸŽจ', '๐ŸŒ…', '๐ŸŒŒ', 'โ˜๏ธ', 'โ›ˆ๏ธ', 'โ„๏ธ', 'โ˜€๏ธ', '๐ŸŒค๏ธ', 'โ›…๏ธ', '๐ŸŒฅ๏ธ', '๐ŸŒฆ๏ธ', '๐ŸŒง๏ธ', '๐ŸŒฉ๏ธ', '๐ŸŒจ๏ธ', '๐ŸŒซ๏ธ', 'โ˜”๏ธ', '๐ŸŒฌ๏ธ', '๐Ÿ’จ', '๐ŸŒช๏ธ', '๐ŸŒˆ'])
for index in noise_indices:
prompt_list[index] = random.choice(noise_chars)
return "".join(prompt_list)
def send_it1(inputs, noise_level, proc1=proc1):
prompt_with_noise = add_random_noise(inputs, noise_level)
while queue.qsize() >= queue_threshold:
time.sleep(2)
queue.put(prompt_with_noise)
output1 = proc1(prompt_with_noise)
return output1
def send_it2(inputs, noise_level, proc1=proc1):
prompt_with_noise = add_random_noise(inputs, noise_level)
while queue.qsize() >= queue_threshold:
time.sleep(2)
queue.put(prompt_with_noise)
output2 = proc1(prompt_with_noise)
return output2
#def send_it3(inputs, noise_level, proc1=proc1):
#prompt_with_noise = add_random_noise(inputs, noise_level)
#while queue.qsize() >= queue_threshold:
#time.sleep(2)
#queue.put(prompt_with_noise)
#output3 = proc1(prompt_with_noise)
#return output3
#def send_it4(inputs, noise_level, proc1=proc1):
#prompt_with_noise = add_random_noise(inputs, noise_level)
#while queue.qsize() >= queue_threshold:
#time.sleep(2)
#queue.put(prompt_with_noise)
#output4 = proc1(prompt_with_noise)
#return output4
with gr.Blocks(css='style.css') as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div>
<h2 style="font-weight: 900; font-size: 3rem; margin-bottom:20px;">
ๅนปๆขฆๆˆ็œŸ-2.0
</h2>
</div>
<p style="margin-bottom: 10px; font-size: 96%">
ๅทฎๅผ‚็จ‹ๅบฆ: ็”จๆ•ฐๅ€ผ่ฐƒ่Š‚ไธคๅผ ๅ›พ็š„ๅทฎๅผ‚็จ‹ๅบฆใ€‚ๆ•ฐๅ€ผ่ถŠๅคง๏ผŒไธคๅผ ๅ›พ็š„ๅทฎๅผ‚่ถŠๅคง๏ผŒๅไน‹่ถŠๅฐใ€‚
</p>
<p style="margin-bottom: 10px; font-size: 98%">
โค๏ธ ๅ–œๆฌข็š„่ฏ๏ผŒๅฐฑ็‚นไธŠ้ข็š„โค๏ธๅง~โค๏ธ</a>
</p>
</div>
"""
)
with gr.Column(elem_id="col-container"):
with gr.Row(variant="compact"):
input_text = gr.Textbox(
label="Short Prompt",
show_label=False,
max_lines=2,
placeholder="่พ“ๅ…ฅไฝ ็š„ๆƒณ่ฑก(่‹ฑๆ–‡่ฏๆฑ‡)๏ผŒ็„ถๅŽๆŒ‰ๅณ่พนๆŒ‰้’ฎใ€‚ๆฒก็ตๆ„Ÿ๏ผŸ็›ดๆŽฅๆŒ‰๏ผ",
).style(
container=False,
)
see_prompts = gr.Button("โœจ ๅ’’่ฏญๆ˜พ็Žฐ โœจ").style(full_width=False)
with gr.Row(variant="compact"):
prompt = gr.Textbox(
label="่พ“ๅ…ฅๆ่ฟฐ่ฏ",
show_label=False,
max_lines=2,
placeholder="ๅฏ่พ“ๅ…ฅๅฎŒๆ•ดๆ่ฟฐ่ฏ๏ผŒๆˆ–่€…็”จๅ’’่ฏญๆ˜พ็ŽฐๆŒ‰้’ฎ็”Ÿๆˆ",
).style(
container=False,
)
run = gr.Button("โœจ ๅนปๆขฆๆˆ็œŸโœจ").style(full_width=False)
with gr.Row():
with gr.Row():
noise_level = gr.Slider(minimum=0.0, maximum=3, step=0.1, label="ๅทฎๅผ‚็จ‹ๅบฆ")
with gr.Row():
with gr.Row():
output1=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
output2=gr.Image(label="Dreamlike-photoreal-2.0",show_label=False)
#with gr.Row():
#output1=gr.Image()
see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False)
run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1])
run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2])
def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "txt2img":
current_model_path = model_path
update_state(f"Loading {current_model.name} text-to-image model...")
if is_colab or current_model == custom_model:
pipe = StableDiffusionPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
safety_checker=lambda images, clip_input: (images, False)
)
else:
pipe = StableDiffusionPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
)
# pipe = pipe.to("cpu")
# pipe = current_model.pipe_t2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
pipe.enable_xformers_memory_efficient_attention()
last_mode = "txt2img"
prompt = current_model.prefix + prompt
result = pipe(
prompt,
negative_prompt = neg_prompt,
num_images_per_prompt=n_images,
num_inference_steps = int(steps),
guidance_scale = guidance,
width = width,
height = height,
generator = generator,
callback=pipe_callback)
# update_state(f"Done. Seed: {seed}")
return replace_nsfw_images(result)
def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
global last_mode
global pipe
global current_model_path
if model_path != current_model_path or last_mode != "img2img":
current_model_path = model_path
update_state(f"Loading {current_model.name} image-to-image model...")
if is_colab or current_model == custom_model:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
safety_checker=lambda images, clip_input: (images, False)
)
else:
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
current_model_path,
torch_dtype=torch.float16,
scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
)
# pipe = pipe.to("cpu")
# pipe = current_model.pipe_i2i
if torch.cuda.is_available():
pipe = pipe.to("cuda")
pipe.enable_xformers_memory_efficient_attention()
last_mode = "img2img"
prompt = current_model.prefix + prompt
ratio = min(height / img.height, width / img.width)
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
result = pipe(
prompt,
negative_prompt = neg_prompt,
num_images_per_prompt=n_images,
image = img,
num_inference_steps = int(steps),
strength = strength,
guidance_scale = guidance,
# width = width,
# height = height,
generator = generator,
callback=pipe_callback)
# update_state(f"Done. Seed: {seed}")
return replace_nsfw_images(result)
def replace_nsfw_images(results):
if is_colab:
return results.images
for i in range(len(results.images)):
if results.nsfw_content_detected[i]:
results.images[i] = Image.open("nsfw.png")
return results.images
with gr.Row():
gr.HTML(
"""
<div class="footer">
<p>
ไฝฟ็”จไบ†<a href="https://huggingface.co/dreamlike-art/dreamlike-photoreal-2.0">Dreamlike Photoreal 2.0</a> ๅˆถไฝœ็š„sdๆจกๅž‹๏ผŒ <a href="https://twitter.com/DavidJohnstonxx/">ๆœฌๆกˆไพ‹ๆœ€ๅˆไฝœ่€…Phenomenon1981</a>
</p>
</div>
<div class="acknowledgments" style="font-size: 115%">
<p>
่ฟ™ไธชๆจกๅž‹ๅ’Œ<a href="https://huggingface.co/spaces/Dao3/DreamlikeArt-Diffusion-1.0">ๅนปๆขฆๆˆ็œŸ</a>็š„ๅŒบๅˆซๆ˜ฏ๏ผšๅนปๆขฆๆ˜พๅฝขๆ›ด่™šๅนป๏ผŒ่ฟ™ไธชๆจกๅž‹ๆ›ด็œŸๅฎž๏ผŒๆฏ•็ซŸ้ƒฝ"ๆˆ็œŸ"ไบ†ๅ˜›ใ€‚ </p>
</p>
</div>
<div class="acknowledgments" style="font-size: 115%">
<p>
ๅฎ‰ๅˆฉ๏ผš่ฟ˜ๆœ‰ไธ€ไธชๆฑ‰ๅŒ–้กน็›ฎ๏ผš<a href="https://tiwenti.chat/">TiwenTi.chat</a>๏ผŒ่ฟ™ๆ˜ฏไธ€ไธชChatGPT็š„ไธญๆ–‡ๆกˆไพ‹ๅบ“๏ผŒๆŒ‰็…งๅทฅๅ…ท็”จ้€”ๅ’Œ่ง’่‰ฒๆ‰ฎๆผ”็”จ้€”ๅšไบ†ๅˆ†็ฑป๏ผŒๆฌข่ฟŽๅŽป็œ‹ๅŽปๅˆ†ไบซ~ </p>
</p>
</div>
"""
)
demo.launch(enable_queue=True, inline=True)
block.queue(concurrency_count=100)