File size: 4,755 Bytes
2334ed3
 
 
 
c693f0b
eb8f7c4
2334ed3
9a96960
2334ed3
 
 
9a96960
2334ed3
 
 
 
 
 
 
680fb78
2334ed3
 
 
 
 
 
 
9d28e90
2334ed3
 
 
 
 
4b9c3a3
 
2554ac6
0acb356
2f2017c
0acb356
8b3e836
2f2017c
0acb356
8b3e836
2f2017c
8b3e836
2f2017c
0acb356
2f2017c
d7c586c
 
60bf99f
d7c586c
 
 
 
 
 
 
4fed6f6
d7c586c
2334ed3
 
 
 
680fb78
 
2334ed3
 
 
a9b40cc
2f2017c
 
 
2334ed3
680fb78
2334ed3
 
 
680fb78
6830819
 
 
 
2f2017c
 
 
 
 
a9b40cc
87d37ae
a9b40cc
 
 
 
87d37ae
 
 
 
4fed6f6
a9b40cc
 
 
 
6830819
599a4c4
4fed6f6
a9b40cc
 
 
 
87d37ae
6830819
 
a9b40cc
 
 
 
6830819
680fb78
 
2334ed3
a7bb4e7
680fb78
2334ed3
 
 
 
 
a7bb4e7
2334ed3
 
 
 
 
a7bb4e7
2334ed3
 
 
 
a7bb4e7
 
2334ed3
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import re
import requests
import datasets
from bs4 import BeautifulSoup

_DBNAME = os.path.basename(__file__).split('.')[0]
_HOMEPAGE = "https://huggingface.co/datasets/monet-joe/" + _DBNAME
_URL = 'https://pytorch.org/vision/main/_modules/'


class cv_backbones(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "ver": datasets.Value("string"),
                    "type": datasets.Value("string"),
                    "input_size": datasets.Value("int16"),
                    "url": datasets.Value("string"),
                }
            ),
            supervised_keys=("ver", "type"),
            homepage=_HOMEPAGE,
            license="mit"
        )

    def _parse_url(self, url):
        response = requests.get(url)
        html = response.text
        return BeautifulSoup(html, 'html.parser')

    def _special_type(self, m_ver):
        m_type = re.search('[a-zA-Z]+', m_ver).group(0)

        if m_type == 'wide' or m_type == 'resnext':
            return 'resnet'

        elif m_type == 'swin':
            return 'swin_transformer'

        elif m_type == 'inception':
            return 'googlenet'

        return m_type

    def _info_on_dataset(self, m_ver, m_type, in1k_span):
        url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
        size_span = url_span.find_next_sibling('span', {'class': 'mi'})
        m_url = str(url_span.text[1:-1])
        input_size = int(size_span.text)
        m_dict = {
            'ver': m_ver,
            'type': m_type,
            'input_size': input_size,
            'url': m_url
        }
        return m_dict, size_span

    def _generate_dataset(self, url):
        torch_page = self._parse_url(url)
        article = torch_page.find('article', {'id': 'pytorch-article'})
        ul = article.find('ul').find('ul')
        in1k_v1, in1k_v2 = [], []

        for li in ul.find_all('li'):
            name = str(li.text)
            if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:

                if name.__contains__('_api') or \
                    name.__contains__('feature_extraction') or \
                        name.__contains__('maxvit'):
                    continue

                href = li.find('a').get('href')
                model_page = self._parse_url(url + href)
                divs = model_page.select('div.viewcode-block')

                for div in divs:
                    div_id = str(div['id'])
                    if div_id.__contains__('_Weights'):
                        m_ver = div_id.split('_Weight')[0].lower()

                        if m_ver.__contains__('swin_v2_'):
                            continue

                        m_type = self._special_type(m_ver)

                        in1k_v1_span = div.find(
                            name='span',
                            attrs={'class': 'n'},
                            string='IMAGENET1K_V1'
                        )

                        if in1k_v1_span == None:
                            continue

                        m_dict, size_span = self._info_on_dataset(
                            m_ver,
                            m_type,
                            in1k_v1_span
                        )
                        in1k_v1.append(m_dict)

                        in1k_v2_span = size_span.find_next_sibling(
                            name='span',
                            attrs={'class': 'n'},
                            string='IMAGENET1K_V2'
                        )

                        if in1k_v2_span != None:
                            m_dict, _ = self._info_on_dataset(
                                m_ver,
                                m_type,
                                in1k_v2_span
                            )
                            in1k_v2.append(m_dict)

        return in1k_v1, in1k_v2

    def _split_generators(self, _):
        in1k_v1, in1k_v2 = self._generate_dataset(_URL)

        return [
            datasets.SplitGenerator(
                name="IMAGENET1K_V1",
                gen_kwargs={
                    "subset": in1k_v1,
                },
            ),
            datasets.SplitGenerator(
                name="IMAGENET1K_V2",
                gen_kwargs={
                    "subset": in1k_v2,
                },
            ),
        ]

    def _generate_examples(self, subset):
        for i, model in enumerate(subset):
            yield i, {
                "ver": model['ver'],
                "type": model['type'],
                "input_size": model['input_size'],
                "url": model['url'],
            }