Spaces:
Running
on
Zero
Running
on
Zero
Update model.py
Browse files
model.py
CHANGED
@@ -114,10 +114,10 @@ class Model:
|
|
114 |
condition_img = condition_img.resize((512,512))
|
115 |
W, H = condition_img.size
|
116 |
|
117 |
-
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(
|
118 |
condition_img = condition_img.to(self.device)
|
119 |
condition_img = 2*(condition_img/255 - 0.5)
|
120 |
-
prompts = [prompt] *
|
121 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
122 |
|
123 |
print(f"processing left-padding...")
|
@@ -160,7 +160,7 @@ class Model:
|
|
160 |
|
161 |
samples = torch.cat((condition_img[0:1], samples), dim=0)
|
162 |
samples = 255 * (samples * 0.5 + 0.5)
|
163 |
-
samples = [
|
164 |
Image.fromarray(
|
165 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(
|
166 |
0, 255).astype(np.uint8)) for sample in samples
|
@@ -204,10 +204,10 @@ class Model:
|
|
204 |
condition_img = condition_img.resize((512,512))
|
205 |
W, H = condition_img.size
|
206 |
|
207 |
-
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(
|
208 |
condition_img = condition_img.to(self.device)
|
209 |
condition_img = 2*(condition_img/255 - 0.5)
|
210 |
-
prompts = [prompt] *
|
211 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
212 |
|
213 |
print(f"processing left-padding...")
|
@@ -250,7 +250,7 @@ class Model:
|
|
250 |
samples = samples.cpu()
|
251 |
samples = torch.cat((condition_img[0:1], samples), dim=0)
|
252 |
samples = 255 * (samples * 0.5 + 0.5)
|
253 |
-
samples = [
|
254 |
Image.fromarray(
|
255 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(0, 255).astype(np.uint8))
|
256 |
for sample in samples
|
|
|
114 |
condition_img = condition_img.resize((512,512))
|
115 |
W, H = condition_img.size
|
116 |
|
117 |
+
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(3,1,1,1)
|
118 |
condition_img = condition_img.to(self.device)
|
119 |
condition_img = 2*(condition_img/255 - 0.5)
|
120 |
+
prompts = [prompt] * 3
|
121 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
122 |
|
123 |
print(f"processing left-padding...")
|
|
|
160 |
|
161 |
samples = torch.cat((condition_img[0:1], samples), dim=0)
|
162 |
samples = 255 * (samples * 0.5 + 0.5)
|
163 |
+
samples = [
|
164 |
Image.fromarray(
|
165 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(
|
166 |
0, 255).astype(np.uint8)) for sample in samples
|
|
|
204 |
condition_img = condition_img.resize((512,512))
|
205 |
W, H = condition_img.size
|
206 |
|
207 |
+
condition_img = torch.from_numpy(np.array(condition_img)).unsqueeze(0).permute(0,3,1,2).repeat(3,1,1,1)
|
208 |
condition_img = condition_img.to(self.device)
|
209 |
condition_img = 2*(condition_img/255 - 0.5)
|
210 |
+
prompts = [prompt] * 3
|
211 |
caption_embs, emb_masks = self.t5_model.get_text_embeddings(prompts)
|
212 |
|
213 |
print(f"processing left-padding...")
|
|
|
250 |
samples = samples.cpu()
|
251 |
samples = torch.cat((condition_img[0:1], samples), dim=0)
|
252 |
samples = 255 * (samples * 0.5 + 0.5)
|
253 |
+
samples = [
|
254 |
Image.fromarray(
|
255 |
sample.permute(1, 2, 0).cpu().detach().numpy().clip(0, 255).astype(np.uint8))
|
256 |
for sample in samples
|