Commit
·
b815b4e
1
Parent(s):
deb41ea
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ from torchvision import transforms as tfms
|
|
12 |
from tqdm.auto import tqdm
|
13 |
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
14 |
import os
|
|
|
15 |
import cv2
|
16 |
import torchvision.transforms as T
|
17 |
|
@@ -21,7 +22,7 @@ logging.set_verbosity_error()
|
|
21 |
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
|
23 |
# Load the autoencoder
|
24 |
-
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder=
|
25 |
|
26 |
# Load tokenizer and text encoder to tokenize and encode the text
|
27 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
@@ -38,9 +39,9 @@ vae = vae.to(torch_device)
|
|
38 |
text_encoder = text_encoder.to(torch_device)
|
39 |
unet = unet.to(torch_device)
|
40 |
|
41 |
-
style_files = ['
|
42 |
-
'
|
43 |
-
'
|
44 |
|
45 |
images_without_loss = []
|
46 |
images_with_loss = []
|
@@ -251,7 +252,7 @@ def display_images_in_rows(images_with_titles, titles):
|
|
251 |
# plt.show()
|
252 |
|
253 |
|
254 |
-
def image_generator(prompt = "
|
255 |
images_without_loss = []
|
256 |
images_with_loss = []
|
257 |
if loss_function == "Yes":
|
@@ -267,7 +268,7 @@ def image_generator(prompt = "snoopy", loss_function=None):
|
|
267 |
images_with_loss.append(generated_img)
|
268 |
|
269 |
generated_sd_images = []
|
270 |
-
titles = ["
|
271 |
|
272 |
for i in range(len(titles)):
|
273 |
generated_sd_images.append((images_without_loss[i], titles[i]))
|
|
|
12 |
from tqdm.auto import tqdm
|
13 |
from transformers import CLIPTextModel, CLIPTokenizer, logging
|
14 |
import os
|
15 |
+
MY_TOKEN=os.environ.get('Stable_Diffusion')
|
16 |
import cv2
|
17 |
import torchvision.transforms as T
|
18 |
|
|
|
22 |
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
|
24 |
# Load the autoencoder
|
25 |
+
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae",use_auth_token=MY_TOKEN)
|
26 |
|
27 |
# Load tokenizer and text encoder to tokenize and encode the text
|
28 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
|
|
39 |
text_encoder = text_encoder.to(torch_device)
|
40 |
unet = unet.to(torch_device)
|
41 |
|
42 |
+
style_files = ['bird_style.bin', 'ronaldo.bin',
|
43 |
+
'pop_art.bin', 'threestooges.bin',
|
44 |
+
'bflan.bin']
|
45 |
|
46 |
images_without_loss = []
|
47 |
images_with_loss = []
|
|
|
252 |
# plt.show()
|
253 |
|
254 |
|
255 |
+
def image_generator(prompt = "sky", loss_function=None):
|
256 |
images_without_loss = []
|
257 |
images_with_loss = []
|
258 |
if loss_function == "Yes":
|
|
|
268 |
images_with_loss.append(generated_img)
|
269 |
|
270 |
generated_sd_images = []
|
271 |
+
titles = ["<birb-style>", "'<ronaldo>", "<pop-art>", "<threestooges>", "<Marbled-painting>"]
|
272 |
|
273 |
for i in range(len(titles)):
|
274 |
generated_sd_images.append((images_without_loss[i], titles[i]))
|