File size: 3,532 Bytes
08bdd0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
from pathlib import Path
from datasets import load_dataset, load_from_disk
from dataclasses import dataclass, field
from huggingface_hub import HfApi
from transformers import AutoModel, AutoTokenizer, HfArgumentParser
from typing import Optional, List
@dataclass
class DownloadArgs:
model_cache_dir: str = field(
default='/share/LMs',
metadata={'help': 'Default path to save language models'}
)
model_name_or_path: Optional[str] = field(
default=None,
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}
)
dataset_cache_dir: str = field(
default='/share/peitian/Data/Datasets/huggingface',
metadata={'help': 'Default path to save huggingface datasets'}
)
dataset_name_or_path: Optional[str] = field(
default=None,
metadata={'help': 'Dataset name'}
)
data_files: Optional[dict] = field(
default=None,
metadata={'help': 'Data files for json dataset.'}
)
dataset_from_disk: bool = field(
default=False,
metadata={'help': 'Load dataset from disk?'}
)
file: Optional[str] = field(
default=None,
metadata={'help': 'File to upload.'}
)
file_in_repo: Optional[str] = field(
default=None,
metadata={'help': 'File name in repository.'}
)
hub_name: Optional[str] = field(
default=None,
metadata={'help': 'Name of the huggingface repo.'}
)
revision: str = field(
default=None,
metadata={'help': 'Remote code revision'}
)
resume_download: bool = field(
default=True,
metadata={'help': 'Resume downloading'}
)
def __post_init__(self):
# folder or model not exists
if self.model_name_or_path is not None:
tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, cache_dir=self.model_cache_dir, trust_remote_code=True)
model = AutoModel.from_pretrained(self.model_name_or_path, cache_dir=self.model_cache_dir, trust_remote_code=True)
# use loop to force success upload
while 1:
try:
tokenizer.push_to_hub(self.hub_name)
break
except:
pass
while 1:
try:
model.push_to_hub(self.hub_name)
break
except:
pass
if self.dataset_name_or_path is not None:
if self.dataset_from_disk:
dataset = load_from_disk(self.dataset_name_or_path)
else:
dataset = load_dataset(self.dataset_name_or_path, data_files=self.data_files, cache_dir=self.dataset_cache_dir)
# use loop to force success upload
while 1:
try:
dataset.push_to_hub(self.hub_name)
break
except:
pass
if self.file is not None:
api = HfApi()
if self.file_in_repo is None:
self.file_in_repo = Path(self.file).name
api.upload_file(
path_or_fileobj=self.file,
path_in_repo=self.file_in_repo,
repo_id=self.hub_name,
repo_type="dataset",
)
if __name__ == "__main__":
parser = HfArgumentParser([DownloadArgs])
args, = parser.parse_args_into_dataclasses()
|