Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -20,9 +20,7 @@ global vae
|
|
20 |
global text_encoder
|
21 |
global tokenizer
|
22 |
global noise_scheduler
|
23 |
-
|
24 |
-
global pointy_val
|
25 |
-
global bags_val
|
26 |
device = "cuda:0"
|
27 |
generator = torch.Generator(device=device)
|
28 |
|
@@ -181,19 +179,10 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
|
|
181 |
|
182 |
|
183 |
def sample_then_run():
|
184 |
-
|
185 |
-
global pointy_val
|
186 |
-
global bags_val
|
187 |
-
global young
|
188 |
-
global pointy
|
189 |
-
global bags
|
190 |
|
191 |
sample_model()
|
192 |
|
193 |
-
young_val = network.proj@young[0]/(torch.norm(young)**2).item()
|
194 |
-
pointy_val = network.proj@pointy[0]/(torch.norm(pointy)**2).item()
|
195 |
-
bags_val = network.proj@bags[0]/(torch.norm(bags)**2).item()
|
196 |
-
|
197 |
prompt = "sks person"
|
198 |
negative_prompt = "low quality, blurry, unfinished, cartoon"
|
199 |
seed = 5
|
|
|
20 |
global text_encoder
|
21 |
global tokenizer
|
22 |
global noise_scheduler
|
23 |
+
|
|
|
|
|
24 |
device = "cuda:0"
|
25 |
generator = torch.Generator(device=device)
|
26 |
|
|
|
179 |
|
180 |
|
181 |
def sample_then_run():
|
182 |
+
|
|
|
|
|
|
|
|
|
|
|
183 |
|
184 |
sample_model()
|
185 |
|
|
|
|
|
|
|
|
|
186 |
prompt = "sks person"
|
187 |
negative_prompt = "low quality, blurry, unfinished, cartoon"
|
188 |
seed = 5
|