Perp-Neg / app.py
rezaarmand's picture
changing layout of the demo
8bf8763
raw
history blame
4.44 kB
import torch
import gradio as gr
import torch
import os
from PIL import Image
from torch import autocast
from perpneg_diffusion.perpneg_stable_diffusion.pipeline_perpneg_stable_diffusion import PerpStableDiffusionPipeline
has_cuda = torch.cuda.is_available()
device = torch.device('cpu' if not has_cuda else 'cuda')
print(device)
# initialize stable diffusion model
pipe = PerpStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
# use_auth_token=True
).to(device)
def dummy(images, **kwargs):
return images, False
pipe.safety_checker = dummy
examples = [
[
"an armchair in the shape of an avocado | cushion in the armchair",
"1 | -0.3",
"145",
"7.5"
],
[
"an armchair in the shape of an avocado",
"1",
"145",
"7.5"
],
[
"a peacock, back view | a peacock, front view",
"1 | -3.5",
"30",
"7.5"
],
[
"a peacock, back view",
"1",
"30",
"7.5"
],
[
"A boy wearing sunglasses | a pair of sunglasses with white frame",
"1 | -0.35",
"200",
"11"
],
[
"A boy wearing sunglasses",
"1",
"200",
"11",
],
[
"a photo of an astronaut riding a horse | a jumping horse | a white horse",
"1 | -0.3 | -0.1",
"1988",
"10"
],
[
"a photo of an astronaut riding a horse | a jumping horse",
"1 | -0.3",
"1988",
"10"
],
[
"a photo of an astronaut riding a horse",
"1",
"1988",
"10"
],
]
def predict(prompt, weights, seed, scale=7.5, steps=50):
try:
with torch.no_grad():
has_cuda = torch.cuda.is_available()
with autocast('cpu' if not has_cuda else 'cuda'):
if has_cuda:
generator = torch.Generator('cuda').manual_seed(int(seed))
else:
generator = torch.Generator().manual_seed(int(seed))
image_perpneg = pipe(prompt, guidance_scale=float(scale), generator=generator,
num_inference_steps=steps, weights=weights)["images"][0]
return image_perpneg
except Exception as e:
print(e)
return None
app = gr.Blocks()
with app:
# gr.Markdown(
# "# **<p align='center'>AMLDS Video Tagging</p>**"
# )
gr.Markdown(
"# **<p align='center'>Perp-Neg: Iterative Editing and Robust View Generation Using Stable Diffusion</p>**"
)
gr.Markdown(
"""
### **<p align='center'>Demo created by Huangjie Zheng and Reza Armandpour</p>**
"""
)
with gr.Row():
with gr.Column():
# with gr.Tab(label="Inputs"):
# gr.Markdown(
# "### Prompts (a list of prompts separated by vertical bar | )"
# )
prompt = gr.Textbox(label="Prompts (a list of prompts separated by vertical bar | ):", show_label=True, placeholder="a peacock, back view | a peacock, front view")
weights = gr.Textbox(
label="Weights (a list of weights separated by vertical bar | )", show_label=True, placeholder="1 | -3.5"
)
seed = gr.Textbox(
label="Seed", show_label=True, value=30
)
scale = gr.Textbox(
label="Guidance scale", show_label=True, value=7.5
)
image_gen_btn = gr.Button(value="Generate")
with gr.Column():
img_output = gr.Image(
label="Result",
show_label=True,
)
gr.Markdown("**Examples:**")
gr.Examples(
examples,
[prompt, weights, seed, scale],
[img_output],
fn=predict,
cache_examples=False,
)
image_gen_btn.click(
predict,
inputs=[prompt, weights, seed, scale],
outputs=[img_output],
)
gr.Markdown("""
\n The algorithem is based on the paper: [Re-imagine the Negative Prompt Algorithm: Transform 2D Diffusion into 3D, alleviate Janus problem and Beyond.](https://Perp-Neg.github.io).
""")
gr.Markdown(
"""
\n Demo created by: Huangjie Zheng and Reza Armandpour</a>.
"""
)
app.launch()