Spaces:
Sleeping
Sleeping
File size: 4,440 Bytes
a1eec31 fb08f6a a1eec31 fb08f6a a1eec31 fb08f6a a1eec31 187b850 a1eec31 fb08f6a a1eec31 8bf8763 a1eec31 187b850 8bf8763 a1eec31 8bf8763 a1eec31 8bf8763 a1eec31 187b850 8bf8763 187b850 a1eec31 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import torch
import gradio as gr
import torch
import os
from PIL import Image
from torch import autocast
from perpneg_diffusion.perpneg_stable_diffusion.pipeline_perpneg_stable_diffusion import PerpStableDiffusionPipeline
has_cuda = torch.cuda.is_available()
device = torch.device('cpu' if not has_cuda else 'cuda')
print(device)
# initialize stable diffusion model
pipe = PerpStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
# use_auth_token=True
).to(device)
def dummy(images, **kwargs):
return images, False
pipe.safety_checker = dummy
examples = [
[
"an armchair in the shape of an avocado | cushion in the armchair",
"1 | -0.3",
"145",
"7.5"
],
[
"an armchair in the shape of an avocado",
"1",
"145",
"7.5"
],
[
"a peacock, back view | a peacock, front view",
"1 | -3.5",
"30",
"7.5"
],
[
"a peacock, back view",
"1",
"30",
"7.5"
],
[
"A boy wearing sunglasses | a pair of sunglasses with white frame",
"1 | -0.35",
"200",
"11"
],
[
"A boy wearing sunglasses",
"1",
"200",
"11",
],
[
"a photo of an astronaut riding a horse | a jumping horse | a white horse",
"1 | -0.3 | -0.1",
"1988",
"10"
],
[
"a photo of an astronaut riding a horse | a jumping horse",
"1 | -0.3",
"1988",
"10"
],
[
"a photo of an astronaut riding a horse",
"1",
"1988",
"10"
],
]
def predict(prompt, weights, seed, scale=7.5, steps=50):
try:
with torch.no_grad():
has_cuda = torch.cuda.is_available()
with autocast('cpu' if not has_cuda else 'cuda'):
if has_cuda:
generator = torch.Generator('cuda').manual_seed(int(seed))
else:
generator = torch.Generator().manual_seed(int(seed))
image_perpneg = pipe(prompt, guidance_scale=float(scale), generator=generator,
num_inference_steps=steps, weights=weights)["images"][0]
return image_perpneg
except Exception as e:
print(e)
return None
app = gr.Blocks()
with app:
# gr.Markdown(
# "# **<p align='center'>AMLDS Video Tagging</p>**"
# )
gr.Markdown(
"# **<p align='center'>Perp-Neg: Iterative Editing and Robust View Generation Using Stable Diffusion</p>**"
)
gr.Markdown(
"""
### **<p align='center'>Demo created by Huangjie Zheng and Reza Armandpour</p>**
"""
)
with gr.Row():
with gr.Column():
# with gr.Tab(label="Inputs"):
# gr.Markdown(
# "### Prompts (a list of prompts separated by vertical bar | )"
# )
prompt = gr.Textbox(label="Prompts (a list of prompts separated by vertical bar | ):", show_label=True, placeholder="a peacock, back view | a peacock, front view")
weights = gr.Textbox(
label="Weights (a list of weights separated by vertical bar | )", show_label=True, placeholder="1 | -3.5"
)
seed = gr.Textbox(
label="Seed", show_label=True, value=30
)
scale = gr.Textbox(
label="Guidance scale", show_label=True, value=7.5
)
image_gen_btn = gr.Button(value="Generate")
with gr.Column():
img_output = gr.Image(
label="Result",
show_label=True,
)
gr.Markdown("**Examples:**")
gr.Examples(
examples,
[prompt, weights, seed, scale],
[img_output],
fn=predict,
cache_examples=False,
)
image_gen_btn.click(
predict,
inputs=[prompt, weights, seed, scale],
outputs=[img_output],
)
gr.Markdown("""
\n The algorithem is based on the paper: [Re-imagine the Negative Prompt Algorithm: Transform 2D Diffusion into 3D, alleviate Janus problem and Beyond.](https://Perp-Neg.github.io).
""")
gr.Markdown(
"""
\n Demo created by: Huangjie Zheng and Reza Armandpour</a>.
"""
)
app.launch()
|