Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
Β·
2a759eb
1
Parent(s):
bf17e84
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import math
|
|
6 |
from pathlib import Path
|
7 |
import sys
|
8 |
sys.path.insert(1, './taming-transformers')
|
9 |
-
#from IPython import display
|
10 |
from base64 import b64encode
|
11 |
from omegaconf import OmegaConf
|
12 |
from PIL import Image
|
@@ -29,6 +29,11 @@ nvidia_smi.nvmlInit()
|
|
29 |
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
|
30 |
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
|
31 |
torch.hub.download_url_to_file('https://i.imgur.com/WEHmKef.jpg', 'gpu.jpg')
|
|
|
|
|
|
|
|
|
|
|
32 |
def sinc(x):
|
33 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
34 |
def lanczos(x, a):
|
@@ -193,9 +198,11 @@ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
|
193 |
print('Using device:', device)
|
194 |
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
|
195 |
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
|
196 |
-
def inference(text, seed, step_size, max_iterations, width, height):
|
197 |
size=[width, height]
|
198 |
texts = text
|
|
|
|
|
199 |
target_images = ""
|
200 |
max_iterations = max_iterations
|
201 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
@@ -293,8 +300,8 @@ def inference(text, seed, step_size, max_iterations, width, height):
|
|
293 |
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
|
294 |
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
|
295 |
out = synth(z)
|
296 |
-
#TF.to_pil_image(out[0].cpu()).save('progress.png')
|
297 |
-
#display.display(display.Image('progress.png'))
|
298 |
res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)
|
299 |
print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%')
|
300 |
def ascend_txt():
|
@@ -303,9 +310,9 @@ def inference(text, seed, step_size, max_iterations, width, height):
|
|
303 |
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
|
304 |
|
305 |
result = []
|
306 |
-
if
|
307 |
-
# result.append(F.mse_loss(z, z_orig) *
|
308 |
-
result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*
|
309 |
for prompt in pMs:
|
310 |
result.append(prompt(iii))
|
311 |
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
|
@@ -342,7 +349,7 @@ def load_image( infilename ) :
|
|
342 |
img.load()
|
343 |
data = np.asarray( img, dtype="int32" )
|
344 |
return data
|
345 |
-
def throttled_inference(text, seed, step_size, max_iterations, width, height):
|
346 |
global inferences_running
|
347 |
current = inferences_running
|
348 |
if current >= 3:
|
@@ -351,7 +358,7 @@ def throttled_inference(text, seed, step_size, max_iterations, width, height):
|
|
351 |
print(f"Inference starting when we already had {current} running")
|
352 |
inferences_running += 1
|
353 |
try:
|
354 |
-
return inference(text, seed, step_size, max_iterations, width, height)
|
355 |
finally:
|
356 |
print("Inference finished")
|
357 |
inferences_running -= 1
|
@@ -366,14 +373,16 @@ gr.Interface(
|
|
366 |
gr.inputs.Slider(minimum=25, maximum=150, default=80, label='max iterations', step=1),
|
367 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='width', step=1),
|
368 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='height', step=1),
|
|
|
|
|
369 |
],
|
370 |
gr.outputs.Image(type="numpy", label="Output"),
|
371 |
title=title,
|
372 |
description=description,
|
373 |
article=article,
|
374 |
examples=[
|
375 |
-
['a garden by james gurney',42,0.16, 100, 256, 256],
|
376 |
-
['coral reef city artstationHQ',1000,0.6, 110, 200, 200],
|
377 |
-
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280]
|
378 |
]
|
379 |
).launch(debug=True)
|
|
|
6 |
from pathlib import Path
|
7 |
import sys
|
8 |
sys.path.insert(1, './taming-transformers')
|
9 |
+
# from IPython import display
|
10 |
from base64 import b64encode
|
11 |
from omegaconf import OmegaConf
|
12 |
from PIL import Image
|
|
|
29 |
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
|
30 |
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
|
31 |
torch.hub.download_url_to_file('https://i.imgur.com/WEHmKef.jpg', 'gpu.jpg')
|
32 |
+
|
33 |
+
torch.hub.download_url_to_file('https://images.pexels.com/photos/158028/bellingrath-gardens-alabama-landscape-scenic-158028.jpeg', 'garden.jpeg')
|
34 |
+
torch.hub.download_url_to_file('https://images.pexels.com/photos/68767/divers-underwater-ocean-swim-68767.jpeg', 'coralreef.jpeg')
|
35 |
+
torch.hub.download_url_to_file('https://images.pexels.com/photos/803975/pexels-photo-803975.jpeg', 'cabin.jpeg')
|
36 |
+
|
37 |
def sinc(x):
|
38 |
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
|
39 |
def lanczos(x, a):
|
|
|
198 |
print('Using device:', device)
|
199 |
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
|
200 |
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
|
201 |
+
def inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight):
|
202 |
size=[width, height]
|
203 |
texts = text
|
204 |
+
init_weight=init_weight
|
205 |
+
init_image = init_image.name
|
206 |
target_images = ""
|
207 |
max_iterations = max_iterations
|
208 |
model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
|
|
|
300 |
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
|
301 |
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
|
302 |
out = synth(z)
|
303 |
+
# TF.to_pil_image(out[0].cpu()).save('progress.png')
|
304 |
+
# display.display(display.Image('progress.png'))
|
305 |
res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)
|
306 |
print(f'gpu: {res.gpu}%, gpu-mem: {res.memory}%')
|
307 |
def ascend_txt():
|
|
|
310 |
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
|
311 |
|
312 |
result = []
|
313 |
+
if init_weight:
|
314 |
+
# result.append(F.mse_loss(z, z_orig) * init_weight / 2)
|
315 |
+
result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*init_weight) / 2)
|
316 |
for prompt in pMs:
|
317 |
result.append(prompt(iii))
|
318 |
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
|
|
|
349 |
img.load()
|
350 |
data = np.asarray( img, dtype="int32" )
|
351 |
return data
|
352 |
+
def throttled_inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight):
|
353 |
global inferences_running
|
354 |
current = inferences_running
|
355 |
if current >= 3:
|
|
|
358 |
print(f"Inference starting when we already had {current} running")
|
359 |
inferences_running += 1
|
360 |
try:
|
361 |
+
return inference(text, seed, step_size, max_iterations, width, height, init_image, init_weight)
|
362 |
finally:
|
363 |
print("Inference finished")
|
364 |
inferences_running -= 1
|
|
|
373 |
gr.inputs.Slider(minimum=25, maximum=150, default=80, label='max iterations', step=1),
|
374 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='width', step=1),
|
375 |
gr.inputs.Slider(minimum=200, maximum=280, default=256, label='height', step=1),
|
376 |
+
gr.inputs.Image(type="file", label="Initial Image"),
|
377 |
+
gr.inputs.Slider(minimum=0.0, maximum=15.0, default=0.0, label='Initial Weight', step=1.0),
|
378 |
],
|
379 |
gr.outputs.Image(type="numpy", label="Output"),
|
380 |
title=title,
|
381 |
description=description,
|
382 |
article=article,
|
383 |
examples=[
|
384 |
+
['a garden by james gurney',42,0.16, 100, 256, 256, 'garden.jpeg', 0.0 ],
|
385 |
+
['coral reef city artstationHQ',1000,0.6, 110, 200, 200, 'coralreef.jpeg', 0.0],
|
386 |
+
['a cabin in the mountains unreal engine',98,0.3, 120, 280, 280, 'cabin.jpeg', 0.0]
|
387 |
]
|
388 |
).launch(debug=True)
|