RuntimeError: Input type (struct c10::Half) and bias type (float) should be the same
- cloned the repo
- created a venv named '.venv'
- activate the venv
- ran pip install -r requirements.txt
- manually installed spaces because it's missing with pip install spaces
- python app.py
- Drag&Dropped an image into gradio UI
- clicked on 'Submit'
- Massive exception:
Traceback (most recent call last):
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\gradio\queueing.py", line 625, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\gradio\route_utils.py", line 322, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\gradio\blocks.py", line 2096, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\gradio\blocks.py", line 1643, in call_function
prediction = await anyio.to_thread.run_sync( # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\anyio_backends_asyncio.py", line 2461, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\anyio_backends_asyncio.py", line 962, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\gradio\utils.py", line 890, in wrapper
response = f(*args, **kwargs)
^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2\app.py", line 47, in fn
result_image = process(im)
^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2\app.py", line 65, in process
foreground = model.inference(image)
^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2\ben_base.py", line 1012, in inference
res = self.forward(img_tensor)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\utils_contextlib.py", line 116, in decorate_context
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\amp\autocast_mode.py", line 44, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2\ben_base.py", line 929, in forward
shallow_batch = self.shallow(x)
^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\container.py", line 250, in forward
input = module(input)
^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\module.py", line 1739, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\module.py", line 1750, in _call_impl
return forward_call(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\conv.py", line 554, in forward
return self._conv_forward(input, self.weight, self.bias)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\RandomUser\Documents\VSC_workspaces\BEN2.venv\Lib\site-packages\torch\nn\modules\conv.py", line 549, in _conv_forward
return F.conv2d(
^^^^^^^^^
RuntimeError: Input type (struct c10::Half) and bias type (float) should be the same
forgot to say but the Quickstart code works fine:
from ben2 import BEN_Base
from PIL import Image
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
file = "./image.png" # input image
model = BEN_Base.from_pretrained("PramaLLC/BEN2")
model.to(device).eval()
image = Image.open(file)
foreground = model.inference(image, refine_foreground=False,) #Refine foreground is an extract postprocessing step that increases inference time but can improve matting edges. The default value is False.
foreground.save("./foreground.png")
Hello, the problem is likely due to the space not using cuda. Can you check if the model is using the cuda device correctly?:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")