Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,643 Bytes
818a6a6 c16c12e 818a6a6 58bf731 818a6a6 58bf731 818a6a6 c16c12e 58bf731 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from functools import wraps
import torch
from huggingface_hub import HfApi
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class DeviceManager:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(DeviceManager, cls).__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if self._initialized:
return
self._initialized = True
self._current_device = None
def check_zero_gpu_availability(self):
try:
if 'SPACE_ID' in os.environ:
api = HfApi()
space_info = api.get_space_runtime(os.environ['SPACE_ID'])
if hasattr(space_info, 'hardware') and space_info.hardware.get('zerogpu', False):
return True
except Exception as e:
logger.warning(f"Error checking ZeroGPU availability: {e}")
return False
def get_optimal_device(self):
if self._current_device is None:
if self.check_zero_gpu_availability():
try:
self._current_device = torch.device('cuda')
logger.info("Using ZeroGPU")
except Exception:
self._current_device = torch.device('cpu')
logger.info("Failed to use ZeroGPU, falling back to CPU")
else:
self._current_device = torch.device('cpu')
logger.info("Using CPU")
return self._current_device |