File size: 7,842 Bytes
4ab846c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 |
import os
import shutil
from pathlib import Path
from typing import Dict, List, TypedDict
from zipfile import ZipFile
import requests
from PIL import Image
from rich.progress import BarColumn, DownloadColumn, MofNCompleteColumn, Progress, TextColumn, TransferSpeedColumn
from tqdm import tqdm
"""
{
"name": "coco/train2017", # Visual Instruct Tuning images are all sourced from COCO Train 2017
"extract": True,
"extract_type": "directory",
"url": "http://images.cocodataset.org/zips/train2017.zip",
"do_rename": True,
},
{
"name": "gqa/images",
"extract": True,
"extract_type": "directory",
"url": "https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip",
"do_rename": True,
},
{
"name": "ocr_vqa/images",
"extract": True,
"extract_type": "directory",
"url": "https://hf-mirror.com/datasets/qnguyen3/ocr_vqa/resolve/main/ocr_vqa.zip",
"do_rename": True,
},
{
"name": "textvqa/train_images",
"extract": True,
"extract_type": "directory",
"url": "https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip",
"do_rename": True,
},
{
"name": "vg/VG_100K_2",
"extract": True,
"extract_type": "directory",
"url": "https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip",
"do_rename": True,
},
"""
# === Dataset Registry w/ Links ===
# fmt: off
DatasetComponent = TypedDict(
"DatasetComponent",
{"name": str, "extract": bool, "extract_type": str, "url": str, "do_rename": bool},
total=False
)
DATASET_REGISTRY: Dict[str, List[DatasetComponent]] = {
# === LLaVa v1.5 Dataset(s) ===
"llava-v1.5-instruct":[
{
"name": "vg/VG_100K",
"extract": True,
"extract_type": "directory",
"url": "https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip",
"do_rename": True,
}
]
}
# fmt: on
def convert_to_jpg(image_dir: Path) -> None:
"""Handling for OCR-VQA Images specifically; iterates through directory, converts all GIFs/PNGs."""
print(f"Converting all Images in `{image_dir}` to JPG")
for image_fn in tqdm(list(image_dir.iterdir())):
jpg_fn = image_dir / f"{image_fn.stem}.jpg" # 创建 JPG 文件名
if image_fn.suffix in {".jpg", ".jpeg"} or jpg_fn.exists():
continue
if image_fn.suffix == ".gif":
gif = Image.open(image_fn)
gif.seek(0)
gif.convert("RGB").save(jpg_fn)
elif image_fn.suffix == ".png":
Image.open(image_fn).convert("RGB").save(jpg_fn)
else:
raise ValueError(f"Unexpected image format `{image_fn.suffix}`")
import os
import shutil
from pathlib import Path
from typing import Dict, List, TypedDict
from zipfile import ZipFile
import requests
from PIL import Image
from rich.progress import BarColumn, DownloadColumn, MofNCompleteColumn, Progress, TextColumn, TransferSpeedColumn
from tqdm import tqdm
# DatasetComponent 和 DATASET_REGISTRY 保持不变
def download_with_progress(url: str, download_dir: Path, chunk_size_bytes: int = 1024) -> Path:
"""Utility function for downloading files from the internet, with a handy Rich-based progress bar."""
print(f"Downloading {url}")
dest_path = download_dir / Path(url).name
resume_header = {}
if dest_path.exists():
return dest_path
max_retries = 5
for attempt in range(max_retries):
try:
response = requests.get(url, headers=resume_header, stream=True)
if response.status_code not in (200, 206):
raise Exception(f"Failed to download. Status code: {response.status_code}")
# 下载进度条
with Progress(
TextColumn("[bold]{task.description} - {task.fields[fname]}"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
DownloadColumn(),
"•",
TransferSpeedColumn(),
transient=True,
) as dl_progress:
dl_tid = dl_progress.add_task(
"Downloading", fname=dest_path.name, total=int(response.headers.get("content-length", "None"))
)
with open(dest_path, "ab") as f: # 以二进制追加模式打开文件
for data in response.iter_content(chunk_size=chunk_size_bytes):
f.write(data)
dl_progress.advance(dl_tid, chunk_size_bytes)
return dest_path
except Exception as e:
print(f"Attempt {attempt + 1}/{max_retries} failed: {e}")
if attempt < max_retries - 1:
print("Retrying...")
else:
raise
# 其他函数保持不变,main 方法也不变
def extract_with_progress(archive_path: Path, download_dir: Path, extract_type: str, cleanup: bool = False) -> Path:
"""Utility function for extracting compressed archives, with a handy Rich-based progress bar."""
assert archive_path.suffix == ".zip", "Only `.zip` compressed archives are supported for now!"
print(f"Extracting {archive_path.name} to `{download_dir}`")
with Progress(
TextColumn("[bold]{task.description} - {task.fields[aname]}"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
MofNCompleteColumn(),
transient=True,
) as ext_progress:
with ZipFile(archive_path) as zf:
ext_tid = ext_progress.add_task("Extracting", aname=archive_path.name, total=len(members := zf.infolist()))
extract_path = Path(zf.extract(members[0], download_dir))
if extract_type == "file":
assert len(members) == 1, f"Archive `{archive_path}` with extract type `{extract_type} has > 1 member!"
elif extract_type == "directory":
for member in members[1:]:
zf.extract(member, download_dir)
ext_progress.advance(ext_tid)
else:
raise ValueError(f"Extract type `{extract_type}` for archive `{archive_path}` is not defined!")
if cleanup:
archive_path.unlink()
return extract_path
def download_extract(dataset_id: str, root_dir: Path) -> None:
"""Download all files for a given dataset (querying registry above), extracting archives if necessary."""
os.makedirs(download_dir := root_dir / "download" / dataset_id, exist_ok=True)
# Download Files
dl_tasks = [d for d in DATASET_REGISTRY[dataset_id] if not (download_dir / d["name"]).exists()]
for dl_task in dl_tasks:
dl_path = download_with_progress(dl_task["url"], download_dir)
if dl_task["extract"]:
dl_path = extract_with_progress(dl_path, download_dir, dl_task["extract_type"])
dl_path = dl_path.parent if dl_path.is_file() else dl_path
if dl_task["do_rename"]:
shutil.move(dl_path, download_dir / dl_task["name"])
if __name__ == "__main__":
import sys
from pathlib import Path
# 设置根目录
root_dir = Path("./data") # 这里设置一个默认的下载目录
os.makedirs(root_dir, exist_ok=True)
# 下载所有数据集
for dataset_id in DATASET_REGISTRY.keys():
print(f"开始下载数据集: {dataset_id}")
download_extract(dataset_id, root_dir)
print("所有数据集下载完成!")
|