Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
Β·
2397336
1
Parent(s):
9a34aef
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,6 @@
|
|
1 |
import torch
|
2 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
3 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
4 |
-
# import torch
|
5 |
-
# torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
6 |
-
# torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
7 |
import argparse
|
8 |
import math
|
9 |
from pathlib import Path
|
@@ -27,6 +24,9 @@ import imageio
|
|
27 |
from PIL import ImageFile, Image
|
28 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
29 |
import gradio as gr
|
|
|
|
|
|
|
30 |
def sinc(x):
|
31 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
32 |
def lanczos(x, a):
|
@@ -183,7 +183,7 @@ args = argparse.Namespace(
|
|
183 |
clip_model='ViT-B/32',
|
184 |
vqgan_config=f'{model_name}.yaml',
|
185 |
vqgan_checkpoint=f'{model_name}.ckpt',
|
186 |
-
step_size=0.
|
187 |
cutn=1,
|
188 |
cut_pow=1.,
|
189 |
display_freq=images_interval,
|
@@ -197,7 +197,7 @@ perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False
|
|
197 |
def inference(text):
|
198 |
texts = text
|
199 |
target_images = ""
|
200 |
-
max_iterations =
|
201 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
202 |
"wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
|
203 |
name_model = model_names[model_name]
|
@@ -337,12 +337,18 @@ def inference(text):
|
|
337 |
|
338 |
inferences_running = 0
|
339 |
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
def throttled_inference(text):
|
341 |
global inferences_running
|
342 |
current = inferences_running
|
343 |
-
if current >=
|
344 |
print(f"Rejected inference when we already had {current} running")
|
345 |
-
return
|
346 |
|
347 |
print(f"Inference starting when we already had {current} running")
|
348 |
inferences_running += 1
|
|
|
1 |
import torch
|
2 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
|
3 |
torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
|
|
|
|
|
|
|
4 |
import argparse
|
5 |
import math
|
6 |
from pathlib import Path
|
|
|
24 |
from PIL import ImageFile, Image
|
25 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
26 |
import gradio as gr
|
27 |
+
|
28 |
+
torch.hub.download_url_to_file('https://i.imgur.com/WEHmKef.jpg', 'gpu.jpg')
|
29 |
+
|
30 |
def sinc(x):
|
31 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
32 |
def lanczos(x, a):
|
|
|
183 |
clip_model='ViT-B/32',
|
184 |
vqgan_config=f'{model_name}.yaml',
|
185 |
vqgan_checkpoint=f'{model_name}.ckpt',
|
186 |
+
step_size=0.15,
|
187 |
cutn=1,
|
188 |
cut_pow=1.,
|
189 |
display_freq=images_interval,
|
|
|
197 |
def inference(text):
|
198 |
texts = text
|
199 |
target_images = ""
|
200 |
+
max_iterations = 150
|
201 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
202 |
"wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
|
203 |
name_model = model_names[model_name]
|
|
|
337 |
|
338 |
inferences_running = 0
|
339 |
|
340 |
+
def load_image( infilename ) :
|
341 |
+
img = Image.open( infilename )
|
342 |
+
img.load()
|
343 |
+
data = np.asarray( img, dtype="int32" )
|
344 |
+
return data
|
345 |
+
|
346 |
def throttled_inference(text):
|
347 |
global inferences_running
|
348 |
current = inferences_running
|
349 |
+
if current >= 3:
|
350 |
print(f"Rejected inference when we already had {current} running")
|
351 |
+
return load_image("./gpu.jpg")
|
352 |
|
353 |
print(f"Inference starting when we already had {current} running")
|
354 |
inferences_running += 1
|