glenn-jocher commited on
Commit
29d79a6
·
unverified ·
1 Parent(s): da2ee39

Do not prefer Apple MPS (#8446)

Browse files

Require explicit request for MPS, i.e.
```bash
python detect.py --device mps
```

Reverts https://github.com/ultralytics/yolov5/pull/8210 for preferring MPS if available.

Note that torch MPS is experiencing ongoing compatibility issues in https://github.com/pytorch/pytorch/issues/77886

Files changed (1) hide show
  1. utils/torch_utils.py +2 -2
utils/torch_utils.py CHANGED
@@ -62,7 +62,7 @@ def select_device(device='', batch_size=0, newline=True):
62
  assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
63
  f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
64
 
65
- if not cpu and torch.cuda.is_available(): # prefer GPU if available
66
  devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
67
  n = len(devices) # device count
68
  if n > 1 and batch_size > 0: # check batch_size is divisible by device_count
@@ -72,7 +72,7 @@ def select_device(device='', batch_size=0, newline=True):
72
  p = torch.cuda.get_device_properties(i)
73
  s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
74
  arg = 'cuda:0'
75
- elif not cpu and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available
76
  s += 'MPS\n'
77
  arg = 'mps'
78
  else: # revert to CPU
 
62
  assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \
63
  f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)"
64
 
65
+ if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available
66
  devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7
67
  n = len(devices) # device count
68
  if n > 1 and batch_size > 0: # check batch_size is divisible by device_count
 
72
  p = torch.cuda.get_device_properties(i)
73
  s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
74
  arg = 'cuda:0'
75
+ elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available
76
  s += 'MPS\n'
77
  arg = 'mps'
78
  else: # revert to CPU