Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,6 @@ from huggingface_hub import snapshot_download
|
|
20 |
#global tokenizer
|
21 |
#global noise_scheduler
|
22 |
device = "cuda:0"
|
23 |
-
generator = torch.Generator(device=device)
|
24 |
|
25 |
models_path = snapshot_download(repo_id="Snapchat/w2w")
|
26 |
|
@@ -41,7 +40,7 @@ network = sample_weights(unet, proj, mean, std, v[:, :1000], device, factor = 1.
|
|
41 |
# global network
|
42 |
# unet, _, _, _, _ = load_models(device)
|
43 |
|
44 |
-
def inference(
|
45 |
#global device
|
46 |
#global generator
|
47 |
#global unet
|
@@ -49,7 +48,7 @@ def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
49 |
#global text_encoder
|
50 |
#global tokenizer
|
51 |
#global noise_scheduler
|
52 |
-
generator =
|
53 |
latents = torch.randn(
|
54 |
(1, unet.in_channels, 512 // 8, 512 // 8),
|
55 |
generator = generator,
|
|
|
20 |
#global tokenizer
|
21 |
#global noise_scheduler
|
22 |
device = "cuda:0"
|
|
|
23 |
|
24 |
models_path = snapshot_download(repo_id="Snapchat/w2w")
|
25 |
|
|
|
40 |
# global network
|
41 |
# unet, _, _, _, _ = load_models(device)
|
42 |
|
43 |
+
def inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
44 |
#global device
|
45 |
#global generator
|
46 |
#global unet
|
|
|
48 |
#global text_encoder
|
49 |
#global tokenizer
|
50 |
#global noise_scheduler
|
51 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
52 |
latents = torch.randn(
|
53 |
(1, unet.in_channels, 512 // 8, 512 // 8),
|
54 |
generator = generator,
|