Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
Β·
d1adddc
1
Parent(s):
5e1afcb
go back
Browse files
app.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
import torch
|
2 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
3 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
4 |
-
torch.hub.download_url_to_file('http://batbot.tv/misc/coco_first_stage.yaml', 'coco_first_stage.yaml')
|
5 |
-
torch.hub.download_url_to_file('http://batbot.tv/misc/coco_first_stage.ckpt', 'coco_first_stage.ckpt')
|
6 |
import argparse
|
7 |
import math
|
8 |
from pathlib import Path
|
@@ -170,40 +168,39 @@ def resize_image(image, out_size):
|
|
170 |
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
|
171 |
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
|
172 |
return image.resize(size, Image.LANCZOS)
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
texts = text
|
192 |
target_images = ""
|
193 |
max_iterations = max_iterations
|
194 |
-
model_name = model_name
|
195 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
196 |
-
"wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "
|
197 |
name_model = model_names[model_name]
|
198 |
-
init_image = ""
|
199 |
-
size=[width, height]
|
200 |
-
seed=seed
|
201 |
-
step_size=step_size
|
202 |
-
|
203 |
-
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
204 |
-
print('Using device:', device)
|
205 |
-
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
|
206 |
-
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
|
207 |
if target_images == "None" or not target_images:
|
208 |
target_images = []
|
209 |
else:
|
@@ -345,7 +342,7 @@ def load_image( infilename ) :
|
|
345 |
img.load()
|
346 |
data = np.asarray( img, dtype="int32" )
|
347 |
return data
|
348 |
-
def throttled_inference(text, seed, step_size, max_iterations, width, height
|
349 |
global inferences_running
|
350 |
current = inferences_running
|
351 |
if current >= 3:
|
@@ -354,7 +351,7 @@ def throttled_inference(text, seed, step_size, max_iterations, width, height, mo
|
|
354 |
print(f"Inference starting when we already had {current} running")
|
355 |
inferences_running += 1
|
356 |
try:
|
357 |
-
return inference(text, seed, step_size, max_iterations, width, height
|
358 |
finally:
|
359 |
print("Inference finished")
|
360 |
inferences_running -= 1
|
@@ -369,15 +366,14 @@ gr.Interface(
|
|
369 |
gr.inputs.Slider(minimum=25, maximum=150, default=80, label='max iterations', step=1),
|
370 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='width', step=1),
|
371 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='height', step=1),
|
372 |
-
gr.inputs.Dropdown(choices=["vqgan_imagenet_f16_16384", "coco_first_stage"], type="value", default="vqgan_imagenet_f16_16384", label="Model Name")
|
373 |
],
|
374 |
gr.outputs.Image(type="numpy", label="Output"),
|
375 |
title=title,
|
376 |
description=description,
|
377 |
article=article,
|
378 |
examples=[
|
379 |
-
['a garden by james gurney',42,0.16, 100, 256, 256
|
380 |
-
['coral reef city artstationHQ',1000,0.6, 110, 200, 200
|
381 |
-
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280
|
382 |
]
|
383 |
).launch(debug=True)
|
|
|
1 |
import torch
|
2 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
3 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
|
|
|
|
4 |
import argparse
|
5 |
import math
|
6 |
from pathlib import Path
|
|
|
168 |
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
|
169 |
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
|
170 |
return image.resize(size, Image.LANCZOS)
|
171 |
+
model_name = "vqgan_imagenet_f16_16384"
|
172 |
+
images_interval = 50
|
173 |
+
width = 280
|
174 |
+
height = 280
|
175 |
+
init_image = ""
|
176 |
+
seed = 42
|
177 |
+
args = argparse.Namespace(
|
178 |
+
noise_prompt_seeds=[],
|
179 |
+
noise_prompt_weights=[],
|
180 |
+
size=[width, height],
|
181 |
+
init_image=init_image,
|
182 |
+
init_weight=0.,
|
183 |
+
clip_model='ViT-B/32',
|
184 |
+
vqgan_config=f'{model_name}.yaml',
|
185 |
+
vqgan_checkpoint=f'{model_name}.ckpt',
|
186 |
+
step_size=0.15,
|
187 |
+
cutn=4,
|
188 |
+
cut_pow=1.,
|
189 |
+
display_freq=images_interval,
|
190 |
+
seed=seed,
|
191 |
+
)
|
192 |
+
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
193 |
+
print('Using device:', device)
|
194 |
+
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
|
195 |
+
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
|
196 |
+
def inference(text, seed, step_size, max_iterations, width, height):
|
197 |
+
size=[width, height]
|
198 |
texts = text
|
199 |
target_images = ""
|
200 |
max_iterations = max_iterations
|
|
|
201 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
202 |
+
"wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
|
203 |
name_model = model_names[model_name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
if target_images == "None" or not target_images:
|
205 |
target_images = []
|
206 |
else:
|
|
|
342 |
img.load()
|
343 |
data = np.asarray( img, dtype="int32" )
|
344 |
return data
|
345 |
+
def throttled_inference(text, seed, step_size, max_iterations, width, height):
|
346 |
global inferences_running
|
347 |
current = inferences_running
|
348 |
if current >= 3:
|
|
|
351 |
print(f"Inference starting when we already had {current} running")
|
352 |
inferences_running += 1
|
353 |
try:
|
354 |
+
return inference(text, seed, step_size, max_iterations, width, height)
|
355 |
finally:
|
356 |
print("Inference finished")
|
357 |
inferences_running -= 1
|
|
|
366 |
gr.inputs.Slider(minimum=25, maximum=150, default=80, label='max iterations', step=1),
|
367 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='width', step=1),
|
368 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='height', step=1),
|
|
|
369 |
],
|
370 |
gr.outputs.Image(type="numpy", label="Output"),
|
371 |
title=title,
|
372 |
description=description,
|
373 |
article=article,
|
374 |
examples=[
|
375 |
+
['a garden by james gurney',42,0.16, 100, 256, 256],
|
376 |
+
['coral reef city artstationHQ',1000,0.6, 110, 200, 200],
|
377 |
+
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280]
|
378 |
]
|
379 |
).launch(debug=True)
|