File size: 4,923 Bytes
2334ed3
 
d24adcc
2334ed3
 
f903536
2334ed3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680fb78
2334ed3
d24adcc
2334ed3
 
 
 
 
 
d24adcc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2334ed3
 
 
 
 
 
d7c586c
 
 
2acae79
 
60bf99f
d7c586c
 
 
 
 
 
 
4fed6f6
d7c586c
2334ed3
 
 
 
680fb78
 
2334ed3
 
 
 
 
680fb78
2334ed3
 
 
680fb78
6830819
 
 
 
 
87d37ae
 
 
 
 
 
4fed6f6
6830819
 
599a4c4
4fed6f6
87d37ae
 
6830819
 
 
 
680fb78
 
2334ed3
a7bb4e7
680fb78
2334ed3
 
 
 
 
a7bb4e7
2334ed3
 
 
 
 
a7bb4e7
2334ed3
 
 
 
a7bb4e7
 
2334ed3
 
 
 
 
d24adcc
2334ed3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import os
import re
import hashlib
import requests
import datasets
from bs4 import BeautifulSoup


_DBNAME = os.path.basename(__file__).split('.')[0]

_HOMEPAGE = "https://huggingface.co/datasets/george-chou/" + _DBNAME

_URL = 'https://pytorch.org/vision/main/_modules/'


class vi_backbones(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "ver": datasets.Value("string"),
                    "type": datasets.Value("string"),
                    "input_size": datasets.Value("int16"),
                    "url": datasets.Value("string"),
                    "md5": datasets.Value("string"),
                }
            ),
            supervised_keys=("ver", "type"),
            homepage=_HOMEPAGE,
            license="mit"
        )
        
    def _get_file_md5(self, url):
        """
        通过 URL 计算文件的 MD5 值

        :param url: 文件 URL 地址
        :return: 文件的 MD5 值
        """
        try:
            response = requests.get(url, stream=True)
            if response.status_code == 200:
                md5obj = hashlib.md5()
                for chunk in response.iter_content(chunk_size=1024*1024):  # 分块读取文件流,每次读取1MB数据
                    md5obj.update(chunk)   # 更新哈希值
                return md5obj.hexdigest()  # 返回MD5值
            else:
                raise ValueError(f"Error downloading file from {url}. Status code: {response.status_code}")
        except Exception as e:
            raise ValueError(f"Error calculating MD5 of file at {url}: {str(e)}")

    def _parse_url(self, url):
        response = requests.get(url)
        html = response.text
        return BeautifulSoup(html, 'html.parser')

    def _info_on_dataset(self, m_ver, m_type, in1k_span):
        url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
        size_span = url_span.find_next_sibling('span', {'class': 'mi'})
        if m_type == 'wide' or m_type == 'resnext':
            m_type = 'resnet'
        m_url = str(url_span.text[1:-1])
        input_size = int(size_span.text)
        m_dict = {
            'ver': m_ver,
            'type': m_type,
            'input_size': input_size,
            'url': m_url
        }
        return m_dict, size_span

    def _generate_dataset(self, url):
        torch_page = self._parse_url(url)
        article = torch_page.find('article', {'id': 'pytorch-article'})
        ul = article.find('ul').find('ul')
        in1k_v1, in1k_v2 = [], []

        for li in ul.find_all('li'):
            name = str(li.text)
            if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
                if name.__contains__('_api') or name.__contains__('feature_extraction'):
                    continue

                href = li.find('a').get('href')
                model_page = self._parse_url(url + href)
                divs = model_page.select('div.viewcode-block')

                for div in divs:
                    div_id = str(div['id'])
                    if div_id.__contains__('_Weights'):
                        m_ver = div_id.split('_Weight')[0].lower()
                        m_type = re.search('[a-zA-Z]+', m_ver).group(0)
                        in1k_v1_span = div.find(
                            'span', {'class': 'n'}, string='IMAGENET1K_V1')

                        if in1k_v1_span == None:
                            continue

                        m_dict, size_span = self._info_on_dataset(
                            m_ver, m_type, in1k_v1_span)
                        in1k_v1.append(m_dict)

                        in1k_v2_span = size_span.find_next_sibling(
                            'span', {'class': 'n'}, string='IMAGENET1K_V2')

                        if in1k_v2_span != None:
                            m_dict, _ = self._info_on_dataset(
                                m_ver, m_type, in1k_v2_span)
                            in1k_v2.append(m_dict)

        return in1k_v1, in1k_v2

    def _split_generators(self, _):
        in1k_v1, in1k_v2 = self._generate_dataset(_URL)

        return [
            datasets.SplitGenerator(
                name="IMAGENET1K_V1",
                gen_kwargs={
                    "subset": in1k_v1,
                },
            ),
            datasets.SplitGenerator(
                name="IMAGENET1K_V2",
                gen_kwargs={
                    "subset": in1k_v2,
                },
            ),
        ]

    def _generate_examples(self, subset):
        for i, model in enumerate(subset):
            yield i, {
                "ver": model['ver'],
                "type": model['type'],
                "input_size": model['input_size'],
                "url": model['url'],
                "md5": self._get_file_md5(model['url']),
            }