File size: 3,812 Bytes
6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 6ef117e ab4cf94 444ba28 6ef117e ab4cf94 6ef117e 6dd859c 460db52 6ef117e 3d65503 ab4cf94 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
# utils/version_info.py
import subprocess
import os
import sys
import gc
import gradio as gr
git = os.environ.get('GIT', "git")
def commit_hash():
try:
return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
except Exception:
return "<none>"
def get_xformers_version():
try:
import xformers
return xformers.__version__
except Exception:
return "<none>"
def get_transformers_version():
try:
import transformers
return transformers.__version__
except Exception:
return "<none>"
def get_accelerate_version():
try:
import accelerate
return accelerate.__version__
except Exception:
return "<none>"
def get_safetensors_version():
try:
import safetensors
return safetensors.__version__
except Exception:
return "<none>"
def get_diffusers_version():
try:
import diffusers
return diffusers.__version__
except Exception:
return "<none>"
def get_torch_info():
from torch import __version__ as torch_version_, version, cuda, backends
initialize_cuda()
try:
info = [torch_version_, f"CUDA Version:{version.cuda}", f"Available:{cuda.is_available()}", f"flash attention enabled: {backends.cuda.flash_sdp_enabled()}", f"Capabilities: {cuda.get_device_capability(0)}", f"Device Name: {cuda.get_device_name(0)}", f"Device Count: {cuda.device_count()}",f"Devices: {os.environ['CUDA_VISIBLE_DEVICES']}", f"Zero :{os.environ['CUDA_MODULE_LOADING']}"]
del torch_version_, version, cuda, backends
return info
except Exception:
del torch_version_, version, cuda, backends
return "<none>"
def release_torch_resources():
from torch import cuda
# Clear the CUDA cache
cuda.empty_cache()
cuda.ipc_collect()
# Delete any objects that are using GPU memory
#for obj in gc.get_objects():
# if is_tensor(obj) or (hasattr(obj, 'data') and is_tensor(obj.data)):
# del obj
# Run garbage collection
del cuda
gc.collect()
def initialize_cuda():
from torch import cuda, version
if cuda.is_available():
device = cuda.device("cuda")
print(f"CUDA is available. Using device: {cuda.get_device_name(0)} with CUDA version: {version.cuda}")
result = "cuda"
else:
device = cuda.device("cpu")
print("CUDA is not available. Using CPU.")
result = "cpu"
return result
def versions_html():
from torch import __version__ as torch_version_
python_version = ".".join([str(x) for x in sys.version_info[0:3]])
commit = commit_hash()
# Define the Toggle Dark Mode link with JavaScript
toggle_dark_link = '''
<a href="#" onclick="document.body.classList.toggle('dark'); return false;" style="cursor: pointer; text-decoration: underline; color: #1a0dab;">
Toggle Dark Mode
</a>
'''
v_html = f"""
version: <a href="https://huggingface.co/spaces/Surn/HexaGrid/commit/{"huggingface" if commit == "<none>" else commit}" target="_blank">{"huggingface" if commit == "<none>" else commit}</a>
 • 
python: <span title="{sys.version}">{python_version}</span>
 • 
torch: {torch_version_}
 • 
diffusers: {get_diffusers_version()}
 • 
transformers: {get_transformers_version()}
 • 
safetensors: {get_safetensors_version()}
 • 
gradio: {gr.__version__}
 • 
{toggle_dark_link}
<br>
Full GPU Info:{get_torch_info()}
"""
del torch_version_
return v_html |