Commit
·
af5f9f5
1
Parent(s):
b8dbce6
Update app.py
Browse filesChanged the auth token
app.py
CHANGED
@@ -28,12 +28,12 @@ logging.set_verbosity_error()
|
|
28 |
# Set device
|
29 |
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
|
31 |
-
import os
|
32 |
-
MY_TOKEN=os.environ.get('Learning')
|
33 |
|
34 |
|
35 |
# Load the autoencoder model which will be used to decode the latents into image space.
|
36 |
-
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae"
|
37 |
|
38 |
# Load the tokenizer and text encoder to tokenize and encode the text.
|
39 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
|
|
28 |
# Set device
|
29 |
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
30 |
|
31 |
+
#import os
|
32 |
+
#MY_TOKEN=os.environ.get('Learning')
|
33 |
|
34 |
|
35 |
# Load the autoencoder model which will be used to decode the latents into image space.
|
36 |
+
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") #,use_auth_token=MY_TOKEN)
|
37 |
|
38 |
# Load the tokenizer and text encoder to tokenize and encode the text.
|
39 |
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|