Spaces:
Running
on
Zero
RuntimeError: The shape of the 2D attn_mask is torch.Size([77, 77]), but should be (1, 1).
Hello,
Would you please explain to me why i encounter this error (both on linux & windows) (the model successfully loads into VRAM using CUDA before that):
Traceback (most recent call last):
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\gradio\queueing.py", line 541, in process_events
response = await route_utils.call_process_api(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\gradio\route_utils.py", line 276, in call_process_api
output = await app.get_blocks().process_api(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\gradio\blocks.py", line 1928, in process_api
result = await self.call_function(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\gradio\blocks.py", line 1514, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\anyio_backends_asyncio.py", line 2177, in run_sync_in_worker_thread
return await future
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\anyio_backends_asyncio.py", line 859, in run
result = context.run(func, *args)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\gradio\utils.py", line 833, in wrapper
response = f(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo\app.py", line 79, in generate
result = pipeline(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo\pipeline\t2v_turbo_vc2_pipeline.py", line 155, in call
prompt_embeds = self._encode_prompt(
File "C:\Users\Win\Desktop\T2V-Turbo\pipeline\t2v_turbo_vc2_pipeline.py", line 56, in _encode_prompt
prompt_embeds = self.text_encoder(prompt)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo\lvdm\modules\encoders\condition.py", line 260, in forward
z = self.encode_with_transformer(tokens.to(self.device))
File "C:\Users\Win\Desktop\T2V-Turbo\lvdm\modules\encoders\condition.py", line 267, in encode_with_transformer
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
File "C:\Users\Win\Desktop\T2V-Turbo\lvdm\modules\encoders\condition.py", line 282, in text_transformer_forward
x = r(x, attn_mask=attn_mask)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\open_clip\transformer.py", line 263, in forward
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\open_clip\transformer.py", line 250, in attention
return self.attn(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\modules\activation.py", line 1266, in forward
attn_output, attn_output_weights = F.multi_head_attention_forward(
File "C:\Users\Win\Desktop\T2V-Turbo.venv\lib\site-packages\torch\nn\functional.py", line 5382, in multi_head_attention_forward
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
RuntimeError: The shape of the 2D attn_mask is torch.Size([77, 77]), but should be (1, 1).