Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -101,13 +101,13 @@ def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
101 |
|
102 |
text_input = tokenizer.value(prompt, padding="max_length", max_length=tokenizer.value.model_max_length, truncation=True, return_tensors="pt")
|
103 |
|
104 |
-
text_embeddings = text_encoder.value(text_input.input_ids.to(device))[0]
|
105 |
|
106 |
max_length = text_input.input_ids.shape[-1]
|
107 |
uncond_input = tokenizer.value(
|
108 |
[negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
|
109 |
)
|
110 |
-
uncond_embeddings = text_encoder.value(uncond_input.input_ids.to(device))[0]
|
111 |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
112 |
noise_scheduler.value.set_timesteps(ddim_steps)
|
113 |
latents = latents * noise_scheduler.value.init_noise_sigma
|
@@ -141,7 +141,7 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
|
|
141 |
#pad to same number of PCs
|
142 |
pcs_original = original_weights.shape[1]
|
143 |
pcs_edits = young.value.shape[1]
|
144 |
-
padding = torch.zeros((1,pcs_original-pcs_edits)).to(device)
|
145 |
young_pad = torch.cat((young.value, padding), 1)
|
146 |
pointy_pad = torch.cat((pointy.value, padding), 1)
|
147 |
wavy_pad = torch.cat((wavy.value, padding), 1)
|
@@ -160,13 +160,13 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
|
|
160 |
|
161 |
text_input = tokenizer.value(prompt, padding="max_length", max_length=tokenizer.value.model_max_length, truncation=True, return_tensors="pt")
|
162 |
|
163 |
-
text_embeddings = text_encoder.value(text_input.input_ids.to(device))[0]
|
164 |
|
165 |
max_length = text_input.input_ids.shape[-1]
|
166 |
uncond_input = tokenizer.value(
|
167 |
[negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
|
168 |
)
|
169 |
-
uncond_embeddings = text_encoder.value(uncond_input.input_ids.to(device))[0]
|
170 |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
171 |
noise_scheduler.value.set_timesteps(ddim_steps)
|
172 |
latents = latents * noise_scheduler.value.init_noise_sigma
|
|
|
101 |
|
102 |
text_input = tokenizer.value(prompt, padding="max_length", max_length=tokenizer.value.model_max_length, truncation=True, return_tensors="pt")
|
103 |
|
104 |
+
text_embeddings = text_encoder.value(text_input.input_ids.to(device.value))[0]
|
105 |
|
106 |
max_length = text_input.input_ids.shape[-1]
|
107 |
uncond_input = tokenizer.value(
|
108 |
[negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
|
109 |
)
|
110 |
+
uncond_embeddings = text_encoder.value(uncond_input.input_ids.to(device.value))[0]
|
111 |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
112 |
noise_scheduler.value.set_timesteps(ddim_steps)
|
113 |
latents = latents * noise_scheduler.value.init_noise_sigma
|
|
|
141 |
#pad to same number of PCs
|
142 |
pcs_original = original_weights.shape[1]
|
143 |
pcs_edits = young.value.shape[1]
|
144 |
+
padding = torch.zeros((1,pcs_original-pcs_edits)).to(device.value)
|
145 |
young_pad = torch.cat((young.value, padding), 1)
|
146 |
pointy_pad = torch.cat((pointy.value, padding), 1)
|
147 |
wavy_pad = torch.cat((wavy.value, padding), 1)
|
|
|
160 |
|
161 |
text_input = tokenizer.value(prompt, padding="max_length", max_length=tokenizer.value.model_max_length, truncation=True, return_tensors="pt")
|
162 |
|
163 |
+
text_embeddings = text_encoder.value(text_input.input_ids.to(device.value))[0]
|
164 |
|
165 |
max_length = text_input.input_ids.shape[-1]
|
166 |
uncond_input = tokenizer.value(
|
167 |
[negative_prompt], padding="max_length", max_length=max_length, return_tensors="pt"
|
168 |
)
|
169 |
+
uncond_embeddings = text_encoder.value(uncond_input.input_ids.to(device.value))[0]
|
170 |
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
171 |
noise_scheduler.value.set_timesteps(ddim_steps)
|
172 |
latents = latents * noise_scheduler.value.init_noise_sigma
|