Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
·
f829d0d
1
Parent(s):
f619e7d
Update app.py
Browse files
app.py
CHANGED
@@ -114,14 +114,14 @@ normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
|
|
114 |
std=[0.26862954, 0.26130258, 0.27577711])
|
115 |
lpips_model = lpips.LPIPS(net='vgg').to(device)
|
116 |
|
117 |
-
def inference(text, init_image, skip_timesteps, clip_guidance_scale):
|
118 |
all_frames = []
|
119 |
prompts = [text]
|
120 |
image_prompts = []
|
121 |
batch_size = 1
|
122 |
clip_guidance_scale = clip_guidance_scale # Controls how much the image should look like the prompt.
|
123 |
-
tv_scale =
|
124 |
-
range_scale =
|
125 |
cutn = 16
|
126 |
n_batches = 1
|
127 |
if init_image:
|
@@ -130,8 +130,8 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale):
|
|
130 |
init_image = None # This can be an URL or Colab local path and must be in quotes.
|
131 |
skip_timesteps = skip_timesteps # This needs to be between approx. 200 and 500 when using an init image.
|
132 |
# Higher values make the output look more like the init.
|
133 |
-
init_scale =
|
134 |
-
seed =
|
135 |
|
136 |
if seed is not None:
|
137 |
torch.manual_seed(seed)
|
@@ -217,6 +217,6 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale):
|
|
217 |
title = "CLIP Guided Diffusion HQ"
|
218 |
description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
219 |
article = "<p style='text-align: center'> By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj' target='_blank'>Colab</a></p>"
|
220 |
-
iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=45, step=1, default=0, label="skip_timesteps"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=700, label="clip guidance scale (Controls how much the image should look like the prompt.)")], outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists"]],
|
221 |
enable_queue=True)
|
222 |
iface.launch()
|
|
|
114 |
std=[0.26862954, 0.26130258, 0.27577711])
|
115 |
lpips_model = lpips.LPIPS(net='vgg').to(device)
|
116 |
|
117 |
+
def inference(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, range_scale, init_scale, seed):
|
118 |
all_frames = []
|
119 |
prompts = [text]
|
120 |
image_prompts = []
|
121 |
batch_size = 1
|
122 |
clip_guidance_scale = clip_guidance_scale # Controls how much the image should look like the prompt.
|
123 |
+
tv_scale = tv_scale # Controls the smoothness of the final output.
|
124 |
+
range_scale = range_scale # Controls how far out of range RGB values are allowed to be.
|
125 |
cutn = 16
|
126 |
n_batches = 1
|
127 |
if init_image:
|
|
|
130 |
init_image = None # This can be an URL or Colab local path and must be in quotes.
|
131 |
skip_timesteps = skip_timesteps # This needs to be between approx. 200 and 500 when using an init image.
|
132 |
# Higher values make the output look more like the init.
|
133 |
+
init_scale = init_scale # This enhances the effect of the init image, a good value is 1000.
|
134 |
+
seed = seed
|
135 |
|
136 |
if seed is not None:
|
137 |
torch.manual_seed(seed)
|
|
|
217 |
title = "CLIP Guided Diffusion HQ"
|
218 |
description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
219 |
article = "<p style='text-align: center'> By Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. | <a href='https://colab.research.google.com/drive/12a_Wrfi2_gwwAuN3VvMTwVMz9TfqctNj' target='_blank'>Colab</a></p>"
|
220 |
+
iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=45, step=1, default=0, label="skip_timesteps"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=700, label="clip guidance scale (Controls how much the image should look like the prompt)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=150, label="tv_scale (Controls the smoothness of the final output)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=50, label="range_scale (Controls how far out of range RGB values are allowed to be)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="init_scale (This enhances the effect of the init image, a good value is 1000)"), gr.inputs.Number(default=0, label="Seed") ], outputs=["image","video"], title=title, description=description, article=article, examples=[["coral reef city by artistation artists", None, 0, 1000, 150, 50, 0, 0]],
|
221 |
enable_queue=True)
|
222 |
iface.launch()
|