Spaces:
Running
Running
File size: 9,408 Bytes
3ad8be1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 |
from joblib import Parallel, delayed, cpu_count
from tqdm import tqdm
import logging
from datetime import datetime
import os
import yaml
import random
import torch
import numpy as np
# from gpustat import GPUStatCollection
import sys
import time
from easydict import EasyDict
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import torch.multiprocessing as mp
from datetime import timedelta
def get_free_gpu(mode="memory", memory_need=10000) -> list:
r"""Get free gpu according to mode (process-free or memory-free).
Args:
mode (str, optional): memory-free or process-free. Defaults to "memory".
memory_need (int): The memory you need, used if mode=='memory'. Defaults to 10000.
Returns:
list: free gpu ids sorting by free memory
"""
assert mode in ["memory", "process"], "mode must be 'memory' or 'process'"
if mode == "memory":
assert memory_need is not None, \
"'memory_need' if None, 'memory' mode must give the free memory you want to apply for"
memory_need = int(memory_need)
assert memory_need > 0, "'memory_need' you want must be positive"
gpu_stats = GPUStatCollection.new_query()
gpu_free_id_list = []
for idx, gpu_stat in enumerate(gpu_stats):
if gpu_check_condition(gpu_stat, mode, memory_need):
gpu_free_id_list.append([idx, gpu_stat.memory_free])
print("gpu[{}]: {}MB".format(idx, gpu_stat.memory_free))
if gpu_free_id_list:
gpu_free_id_list = sorted(gpu_free_id_list,
key=lambda x: x[1],
reverse=True)
gpu_free_id_list = [i[0] for i in gpu_free_id_list]
return gpu_free_id_list
def gpu_check_condition(gpu_stat, mode, memory_need) -> bool:
r"""Check gpu is free or not.
Args:
gpu_stat (gpustat.core): gpustat to check
mode (str): memory-free or process-free.
memory_need (int): The memory you need, used if mode=='memory'
Returns:
bool: gpu is free or not
"""
if mode == "memory":
return gpu_stat.memory_free > memory_need
elif mode == "process":
for process in gpu_stat.processes:
if process["command"] == "python":
return False
return True
else:
return False
def get_device(gpu_check_list, memory_need=10000):
# check device
target_gpus = list(filter(lambda x: x is not None, gpu_check_list))
# assert torch.cuda.device_count() >= len(target_gpus), 'do you set the gpus in config correctly?'
flag = None
while True:
# Get the gpu ids which have more than 10000MB memory
free_gpu_ids = get_free_gpu('memory', memory_need)
if len(free_gpu_ids) < 1:
if flag is None:
print("No GPU available now. Wait or Exit? y/n")
flag = input()
if flag.strip() == 'y':
continue
else:
device = torch.device('cpu')
print("Using device %s as main device" % device)
break
time.sleep(60)
else:
free_target_gpu = list(set(free_gpu_ids) & set(target_gpus))
if len(free_target_gpu) == 0:
gpuid = free_gpu_ids[0]
print(f"no target GPU is not available")
else:
gpuid = free_target_gpu[0]
device = torch.device('cuda:'+str(gpuid))
print("Using device %s as main device" % device)
break
return device
def get_config_easydict(config_path):
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return EasyDict(config)
def pmap_multi(pickleable_fn, data, n_jobs=None, verbose=1, desc=None, **kwargs):
"""
Parallel map using joblib.
Parameters
----------
pickleable_fn : callable
Function to map over data.
data : iterable
Data over which we want to parallelize the function call.
n_jobs : int, optional
The maximum number of concurrently running jobs. By default, it is one less than
the number of CPUs.
verbose: int, optional
The verbosity level. If nonzero, the function prints the progress messages.
The frequency of the messages increases with the verbosity level. If above 10,
it reports all iterations. If above 50, it sends the output to stdout.
kwargs
Additional arguments for :attr:`pickleable_fn`.
Returns
-------
list
The i-th element of the list corresponds to the output of applying
:attr:`pickleable_fn` to :attr:`data[i]`.
"""
if n_jobs is None:
n_jobs = cpu_count() - 1
results = Parallel(n_jobs=n_jobs, verbose=verbose, timeout=None)(
delayed(pickleable_fn)(*d, **kwargs) for i, d in tqdm(enumerate(data),desc=desc)
)
return results
def get_names_from_txt(txt_path):
with open(txt_path,'r') as f:
lines = f.read().strip().split('\n')
return lines
def get_logger(run_dir, rank=0):
"""
Set the logger
"""
if not os.path.exists(run_dir):
os.makedirs(run_dir)
logfile_name = os.path.join(run_dir, 'log.txt')
fmt = "%(asctime)-15s %(levelname)s %(message)s"
filedatefmt = "%a %d %b %Y %H:%M:%S"
fileformatter = logging.Formatter(fmt, filedatefmt)
sdatefmt = "%d %b %H:%M"
sformatter = logging.Formatter(fmt, sdatefmt)
fh = logging.FileHandler(logfile_name)
fh.setLevel(logging.INFO)
fh.setFormatter(fileformatter)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(sformatter)
logging.basicConfig(level=logging.INFO, handlers=[fh, sh])
return logging.getLogger()
def yaml_config_hook(config_file):
"""
Custom YAML config loader, which can include other yaml files (I like using config files
insteaad of using argparser)
"""
# load yaml files in the nested 'defaults' section, which include defaults for experiments
with open(config_file,'rb') as f:
cfg = yaml.safe_load(f)
for d in cfg.get("defaults", []):
config_dir, cf = d.popitem()
cf = os.path.join(os.path.dirname(config_file), config_dir, cf + ".yaml")
with open(cf) as f:
l = yaml.safe_load(f)
cfg.update(l)
if "defaults" in cfg.keys():
del cfg["defaults"]
return cfg
def set_seed(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.autograd.set_detect_anomaly(True)
#torch.use_deterministic_algorithms(True)
def ddp_setup(local_rank):
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl',timeout=timedelta(seconds=7200))
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = "nccl"
print(
"| distributed init (rank {} local rank {}): {}".format(
args.rank, args.local_rank, "env://"
),
flush=True,
)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method="env://", world_size=args.world_size, rank=args.rank
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def ddp_cleanup():
dist.destroy_process_group()
def get_run_dir(config):
run_dir = f'{config.root_dir}/{config.data.dataset_name}' \
f'_{config.target}_model{config.model.model_type}_gtype{config.data.prot_graph_type}' \
f'_lcut{config.data.ligcut}_pcut{config.data.protcut}_icut{config.data.intercut}_ccut{config.data.chaincut}' \
f'_pgmn{config.data.prot_max_neighbors}_lgmn{config.data.lig_max_neighbors}' \
f'_igmn{config.data.inter_min_neighbors}_igmn{config.data.inter_max_neighbors}' \
f'_test2{config.data.test_2}'
return run_dir
def get_esm_layer(esm_model_name):
esm_layer = {'esm2_t48_15B_UR50D':48,
'esm2_t36_3B_UR50D':36,
'esm2_t33_650M_UR50D':33,
'esm2_t30_150M_UR50D':30,
'esm2_t12_35M_UR50D':12,
'esm2_t6_8M_UR50D':6,}
return esm_layer[esm_model_name]
def get_esm_dim(esm_model_name):
esm_dim = {'esm2_t48_15B_UR50D':5120,
'esm2_t36_3B_UR50D':2560,
'esm2_t33_650M_UR50D':1280,
'esm2_t30_150M_UR50D':640,
'esm2_t12_35M_UR50D':480,
'esm2_t6_8M_UR50D':320,}
return esm_dim[esm_model_name] |