Silence1412's picture
Update app.py
9d983b8
raw
history blame
4.14 kB
import gradio as gr
import random
import re
import numpy as np
import torch
from PIL import Image
from diffusers import StableDiffusionPipeline
from transformers import pipeline, set_seed
gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
def generate(starting_text):
seed = random.randint(100, 1000000)
set_seed(seed)
if starting_text == "":
starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
response_list = []
for x in response:
resp = x['generated_text'].strip()
if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "β€”")) is False:
response_list.append(resp+'\n')
response_end = "\n".join(response_list)
response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
response_end = response_end.replace("<", "").replace(">", "")
if response_end != "":
return response_end
txt = gr.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
out = gr.Textbox(lines=4, label="Generated Prompts")
title = "Stable Diffusion Prompt Generator"
gr.Interface(fn=generate,
inputs=txt,
outputs=out,
title=title,
article='',
allow_flagging='never',
cache_examples=False,
theme="default").launch(enable_queue=True, debug=True)
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id).to('cpu')
def infer(prompt, negative, steps, scale, seed):
generator = torch.Generator(device='cpu').manual_seed(seed)
img = pipe(
prompt,
height=512,
width=512,
num_inference_steps=steps,
guidance_scale=scale,
negative_prompt = negative,
generator=generator,
).images
return img
block = gr.Blocks()
with block:
with gr.Group():
with gr.Box():
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
with gr.Column():
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
negative = gr.Textbox(
label="Enter your negative prompt",
show_label=False,
placeholder="Enter a negative prompt",
elem_id="negative-prompt-text-input",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),container=False,
)
btn = gr.Button("Generate image").style(
margin=False,
rounded=(False, True, True, False),
)
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(columns=(1, 2), height="auto")
with gr.Row(elem_id="advanced-options"):
samples = gr.Slider(label="Images", minimum=1, maximum=1, value=1, step=1, interactive=False)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, value=12, step=1, interactive=True)
scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1, interactive=True)
seed = gr.Slider(label="Random seed",minimum=0,maximum=2147483647,step=1,randomize=True,interactive=True)
btn.click(infer, inputs=[text, negative, steps, scale, seed], outputs=[gallery])
block.launch(show_api=False)