TheFrenchDemos commited on
Commit
9705a2a
·
0 Parent(s):

Initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +20 -0
  2. .gitattributes +38 -0
  3. .gitignore +2 -0
  4. Dockerfile +37 -0
  5. README.md +14 -0
  6. app.py +285 -0
  7. count_authors.py +95 -0
  8. data/openimages_index.bin +3 -0
  9. data/openimages_urls.txt +3 -0
  10. data_prep.py +39 -0
  11. request.py +56 -0
  12. requirements.txt +3 -0
  13. static/1.webp +0 -0
  14. static/2.webp +0 -0
  15. static/3.webp +0 -0
  16. static/RO_Summary.pdf +3 -0
  17. static/Reg_Summary.pdf +3 -0
  18. static/checkpoints/dinov2_vits14_pretrain.pth +3 -0
  19. static/facebookresearch_dinov2_main/.github/workflows/lint.yaml +38 -0
  20. static/facebookresearch_dinov2_main/.gitignore +11 -0
  21. static/facebookresearch_dinov2_main/CODE_OF_CONDUCT.md +80 -0
  22. static/facebookresearch_dinov2_main/CONTRIBUTING.md +31 -0
  23. static/facebookresearch_dinov2_main/LICENSE +203 -0
  24. static/facebookresearch_dinov2_main/MODEL_CARD.md +272 -0
  25. static/facebookresearch_dinov2_main/README.md +620 -0
  26. static/facebookresearch_dinov2_main/conda-extras.yaml +24 -0
  27. static/facebookresearch_dinov2_main/conda.yaml +22 -0
  28. static/facebookresearch_dinov2_main/dinov2/__init__.py +6 -0
  29. static/facebookresearch_dinov2_main/dinov2/configs/__init__.py +22 -0
  30. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml +6 -0
  31. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_reg4_pretrain.yaml +9 -0
  32. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml +7 -0
  33. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_reg4_pretrain.yaml +10 -0
  34. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml +6 -0
  35. static/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_reg4_pretrain.yaml +9 -0
  36. static/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml +6 -0
  37. static/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_reg4_pretrain.yaml +9 -0
  38. static/facebookresearch_dinov2_main/dinov2/configs/ssl_default_config.yaml +118 -0
  39. static/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml +26 -0
  40. static/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml +26 -0
  41. static/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml +6 -0
  42. static/facebookresearch_dinov2_main/dinov2/data/__init__.py +10 -0
  43. static/facebookresearch_dinov2_main/dinov2/data/adapters.py +28 -0
  44. static/facebookresearch_dinov2_main/dinov2/data/augmentations.py +118 -0
  45. static/facebookresearch_dinov2_main/dinov2/data/collate.py +49 -0
  46. static/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py +7 -0
  47. static/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py +31 -0
  48. static/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py +38 -0
  49. static/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py +290 -0
  50. static/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py +302 -0
.dockerignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ env
7
+ pip-log.txt
8
+ pip-delete-this-directory.txt
9
+ .tox
10
+ .coverage
11
+ .coverage.*
12
+ .cache
13
+ nosetests.xml
14
+ coverage.xml
15
+ *.cover
16
+ *.log
17
+ .pytest_cache
18
+ .env
19
+ .venv
20
+ .DS_Store
.gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.pdf filter=lfs diff=lfs merge=lfs -text
2
+ *.txt filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.7z filter=lfs diff=lfs merge=lfs -text
5
+ *.arrow filter=lfs diff=lfs merge=lfs -text
6
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
7
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
8
+ *.ftz filter=lfs diff=lfs merge=lfs -text
9
+ *.gz filter=lfs diff=lfs merge=lfs -text
10
+ *.h5 filter=lfs diff=lfs merge=lfs -text
11
+ *.joblib filter=lfs diff=lfs merge=lfs -text
12
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ static/ia_gen_droits_auteur.pdf filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ .DS_Store
Dockerfile ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ # WORKDIR /app
4
+
5
+ # COPY requirements.txt .
6
+ # RUN pip install --no-cache-dir -r requirements.txt
7
+
8
+ # COPY . .
9
+
10
+ # EXPOSE 8080
11
+
12
+ # CMD ["python", "app.py"]
13
+
14
+
15
+ FROM python:3.9-slim
16
+
17
+ # Set the working directory inside the container
18
+ WORKDIR /app
19
+
20
+ # Install dependencies
21
+ COPY requirements.txt .
22
+ RUN pip install --no-cache-dir -r requirements.txt
23
+
24
+ # Copy the rest of the application code
25
+ COPY . .
26
+
27
+ # Expose the port your React app runs on
28
+ EXPOSE 7860
29
+
30
+ # Add ownership to the user
31
+ RUN chown -R 1001:1001 /app
32
+
33
+ # Change to the created user
34
+ USER 1001
35
+
36
+ # Command to run the application
37
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GenAI Attribution Simulator
3
+ emoji: 🎁
4
+ colorFrom: gray
5
+ colorTo: yellow
6
+ sdk: docker
7
+ pinned: false
8
+ license: apache-2.0
9
+ short_description: A reward simulator for training data attribution
10
+ ---
11
+
12
+ # Reward Simulator
13
+
14
+ This repository contains the code for the reward simulator. It is a tool that can be used to simulate the reward that training data would receive from a model's generations.
app.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # docker build -t reward-simulator .docker run -p 7860:7860 -v $(pwd)/data:/app/data reward-simulator
2
+
3
+ from PIL import Image
4
+ import numpy as np
5
+ import io
6
+ import faiss
7
+
8
+ import requests
9
+ import torch
10
+
11
+ from request import get_ft, get_topk
12
+ from flickrapi import FlickrAPI
13
+
14
+ from flask import Flask, request, render_template, jsonify, send_from_directory
15
+ app = Flask(__name__)
16
+
17
+ PRESET_IMAGES = {
18
+ 1: "static/1.webp",
19
+ 2: "static/2.webp",
20
+ 3: "static/3.webp"
21
+ }
22
+
23
+ # Add Flickr configuration
24
+ FLICKR_API_KEY = '80ef21a6f7eb0984ea613c316a89ca69'
25
+ FLICKR_API_SECRET = '4d0e8ce6734f4b3f'
26
+ flickr = FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET, format='parsed-json', store_token=False)
27
+
28
+ def get_photo_id(url):
29
+ """Extract photo ID from Flickr URL"""
30
+ try:
31
+ return url.split('/')[-1].split('_')[0]
32
+ except:
33
+ return None
34
+
35
+ def get_other_info(url):
36
+ """Get author information from Flickr"""
37
+ try:
38
+ photo_id = get_photo_id(url)
39
+ if photo_id:
40
+ photo_info = flickr.photos.getInfo(photo_id=photo_id)
41
+ license = photo_info['photo']['license']
42
+ owner = photo_info['photo']['owner']
43
+ flickr_url = f"https://www.flickr.com/photos/{owner.get('nsid', '')}/{photo_id}"
44
+ return {
45
+ 'username': owner.get('username', ''),
46
+ 'realname': owner.get('realname', ''),
47
+ 'nsid': owner.get('nsid', ''),
48
+ 'flickr_url': flickr_url,
49
+ 'license': license
50
+ }
51
+ except:
52
+ pass
53
+ return {
54
+ 'username': 'Unknown',
55
+ 'realname': 'Unknown',
56
+ 'nsid': '',
57
+ 'flickr_url': '',
58
+ 'license': 'Unknown'
59
+ }
60
+
61
+
62
+ def load_model():
63
+ """Load DINOv2 model once and cache it"""
64
+ torch.hub.set_dir('static')
65
+ model = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
66
+ model.eval()
67
+ model.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
68
+ return model
69
+
70
+ def load_index(index_path):
71
+ """Load FAISS index once and cache it"""
72
+ return faiss.read_index(index_path)
73
+
74
+ def distance_to_similarity(distances, temp=1e-4):
75
+ """Convert distance to similarity"""
76
+ for ii in range(len(distances)):
77
+ contribs = distances[ii].max() - distances[ii]
78
+ contribs = contribs / temp
79
+ sum_contribs = np.exp(contribs).sum()
80
+ distances[ii] = np.exp(contribs) / sum_contribs
81
+ return distances
82
+
83
+ def calculate_rewards(subscription, num_generations, author_share, ro_share, num_users_k, similarities, num_authors=1800):
84
+ """Calculate rewards based on user inputs and similarities"""
85
+ num_users = num_users_k * 1000
86
+
87
+ # Monthly revenue allocated to authors
88
+ authors_monthly_revenue = subscription * num_users * (author_share / 100)
89
+
90
+ rewards = []
91
+ for sim in similarities[0]:
92
+ # Attribution bonus based on similarity score and number of neighbors
93
+ attribution_bonus = sim * len(similarities[0])
94
+
95
+ # Calculate monthly rewards
96
+ author_month_reward = (authors_monthly_revenue / num_authors) * attribution_bonus
97
+ ro_month_reward = author_month_reward / (author_share / 100) * (ro_share / 100)
98
+
99
+ rewards.append({
100
+ 'paid_per_month': f"{subscription:.0f}€",
101
+ 'attribution': f"{sim*100:.0f}%",
102
+ 'author_month_reward': f"{author_month_reward:.0f}€",
103
+ 'ro_month_reward': f"{ro_month_reward:.0f}€"
104
+ # 'paid_per_month': f"{subscription:.0f}€",
105
+ # 'paid_per_gen': f"{paid_per_gen:.2f}€",
106
+ # 'aro_share': f"{aro_share:.2f}c€",
107
+ # 'attribution': f"{sim*100:.0f}%",
108
+ # 'training_data_reward': f"{training_data_reward:.2f}c€",
109
+ # 'author_month_reward': f"{author_month_reward:.0f}€",
110
+ # 'ro_month_reward': f"{ro_month_reward:.0f}€"
111
+ })
112
+ return rewards
113
+
114
+ # Global variables for model and index
115
+ model = None
116
+ index = None
117
+ urls = None
118
+
119
+ def init_model():
120
+ global model, index, urls
121
+ model = load_model()
122
+ index = load_index("data/openimages_index.bin")
123
+ with open("data/openimages_urls.txt", "r") as f:
124
+ urls = f.readlines()
125
+
126
+ @app.route('/')
127
+ def home():
128
+ return render_template('index.html')
129
+
130
+ @app.route('/static/<path:filename>')
131
+ def serve_static(filename):
132
+ return send_from_directory('static', filename)
133
+
134
+ @app.route('/select_preset/<int:preset_id>')
135
+ def select_preset(preset_id):
136
+ if preset_id not in PRESET_IMAGES:
137
+ return jsonify({'error': 'Invalid preset ID'}), 400
138
+
139
+ try:
140
+ image_path = PRESET_IMAGES[preset_id]
141
+ image = Image.open(image_path).convert('RGB')
142
+
143
+ # Use default parameters for presets
144
+ params = {
145
+ 'subscription': 12,
146
+ 'num_generations': 60,
147
+ 'author_share': 5,
148
+ 'ro_share': 10,
149
+ 'num_users_k': 500,
150
+ 'num_neighbors': 10
151
+ }
152
+
153
+ # Get features and search
154
+ features = get_ft(model, image)
155
+ distances, indices = get_topk(index, features, topk=params['num_neighbors'])
156
+
157
+ # Collect valid results first
158
+ valid_results = []
159
+ valid_similarities = []
160
+ for i in range(params['num_neighbors']):
161
+ image_url = urls[indices[0][i]].strip()
162
+ try:
163
+ response = requests.head(image_url)
164
+ if response.status_code == 200:
165
+ valid_results.append({
166
+ 'index': i,
167
+ 'url': image_url
168
+ })
169
+ valid_similarities.append(distances[0][i])
170
+ except requests.RequestException:
171
+ continue
172
+
173
+ # Renormalize similarities for valid results
174
+ if valid_similarities:
175
+ similarities = distance_to_similarity(np.array([valid_similarities]), temp=1e-5)
176
+
177
+ # Calculate rewards with renormalized similarities
178
+ rewards = calculate_rewards(
179
+ params['subscription'],
180
+ params['num_generations'],
181
+ params['author_share'],
182
+ params['ro_share'],
183
+ params['num_users_k'],
184
+ similarities
185
+ )
186
+
187
+ # Build final results
188
+ results = []
189
+ for i, result in enumerate(valid_results):
190
+ other_info = get_other_info(result['url'])
191
+ results.append({
192
+ 'image_url': result['url'],
193
+ 'rewards': rewards[i],
194
+ 'other': other_info
195
+ })
196
+
197
+ return jsonify({'results': results})
198
+
199
+ except Exception as e:
200
+ return jsonify({'error': str(e)}), 500
201
+
202
+ DEFAULT_PARAMS = {
203
+ 'subscription': 12,
204
+ 'num_generations': 60,
205
+ 'author_share': 5,
206
+ 'ro_share': 10,
207
+ 'num_users_k': 500,
208
+ 'num_neighbors': 8,
209
+ 'num_authors': 1800
210
+ }
211
+
212
+ @app.route('/process', methods=['POST'])
213
+ def process_image():
214
+ if 'image' not in request.files:
215
+ return jsonify({'error': 'No image provided'}), 400
216
+
217
+ try:
218
+ image_file = request.files['image']
219
+ image = Image.open(io.BytesIO(image_file.read())).convert('RGB')
220
+
221
+ # Use default parameters if none provided
222
+ params = DEFAULT_PARAMS.copy()
223
+ if request.form:
224
+ params.update({
225
+ 'subscription': float(request.form.get('subscription', params['subscription'])),
226
+ 'num_generations': int(request.form.get('num_generations', params['num_generations'])),
227
+ 'author_share': float(request.form.get('author_share', params['author_share'])),
228
+ 'ro_share': float(request.form.get('ro_share', params['ro_share'])),
229
+ 'num_users_k': int(request.form.get('num_users_k', params['num_users_k'])),
230
+ 'num_neighbors': int(request.form.get('num_neighbors', params['num_neighbors'])),
231
+ 'num_authors': int(request.form.get('num_authors', DEFAULT_PARAMS['num_authors'])),
232
+ })
233
+
234
+ # Process image
235
+ features = get_ft(model, image)
236
+ distances, indices = get_topk(index, features, topk=params['num_neighbors'])
237
+
238
+ # Collect valid results first
239
+ valid_results = []
240
+ valid_similarities = []
241
+ for i in range(params['num_neighbors']):
242
+ image_url = urls[indices[0][i]].strip()
243
+ try:
244
+ response = requests.head(image_url)
245
+ if response.status_code == 200:
246
+ valid_results.append({
247
+ 'index': i,
248
+ 'url': image_url
249
+ })
250
+ valid_similarities.append(distances[0][i])
251
+ except requests.RequestException:
252
+ continue
253
+
254
+ # Renormalize similarities for valid results
255
+ if valid_similarities:
256
+ similarities = distance_to_similarity(np.array([valid_similarities]), temp=1e-5)
257
+
258
+ # Calculate rewards with renormalized similarities
259
+ rewards = calculate_rewards(
260
+ params['subscription'],
261
+ params['num_generations'],
262
+ params['author_share'],
263
+ params['ro_share'],
264
+ params['num_users_k'],
265
+ similarities
266
+ )
267
+
268
+ # Build final results
269
+ results = []
270
+ for i, result in enumerate(valid_results):
271
+ other_info = get_other_info(result['url'])
272
+ results.append({
273
+ 'image_url': result['url'],
274
+ 'rewards': rewards[i],
275
+ 'other': other_info
276
+ })
277
+
278
+ return jsonify({'results': results})
279
+
280
+ except Exception as e:
281
+ return jsonify({'error': str(e)}), 500
282
+
283
+ if __name__ == '__main__':
284
+ init_model()
285
+ app.run(host='0.0.0.0', port=7860)
count_authors.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tqdm
2
+ from multiprocessing import Pool, cpu_count
3
+ import signal
4
+ import sys
5
+ import time
6
+
7
+ from flickrapi import FlickrAPI
8
+
9
+ # Add Flickr configuration
10
+ FLICKR_API_KEY = '80ef21a6f7eb0984ea613c316a89ca69'
11
+ FLICKR_API_SECRET = '4d0e8ce6734f4b3f'
12
+ flickr = FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET, format='parsed-json', store_token=False)
13
+
14
+ def get_photo_id(url):
15
+ """Extract photo ID from Flickr URL"""
16
+ try:
17
+ return url.split('/')[-1].split('_')[0]
18
+ except:
19
+ return None
20
+
21
+ def get_other_info(url):
22
+ """Get author information from Flickr"""
23
+ try:
24
+ photo_id = get_photo_id(url)
25
+ if photo_id:
26
+ # wait for 0.1 second
27
+ time.sleep(0.1)
28
+ photo_info = flickr.photos.getInfo(photo_id=photo_id)
29
+ license = photo_info['photo']['license']
30
+ owner = photo_info['photo']['owner']
31
+ flickr_url = f"https://www.flickr.com/photos/{owner.get('nsid', '')}/{photo_id}"
32
+ return {
33
+ 'username': owner.get('username', ''),
34
+ 'realname': owner.get('realname', ''),
35
+ 'nsid': owner.get('nsid', ''),
36
+ 'flickr_url': flickr_url,
37
+ 'license': license
38
+ }
39
+ except:
40
+ pass
41
+ return {
42
+ 'username': 'Unknown',
43
+ 'realname': 'Unknown',
44
+ 'nsid': '',
45
+ 'flickr_url': '',
46
+ 'license': 'Unknown'
47
+ }
48
+
49
+ def init_worker():
50
+ """Initialize worker process to handle signals"""
51
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
52
+
53
+ def process_url(url):
54
+ try:
55
+ return get_other_info(url)
56
+ except Exception as e:
57
+ return {
58
+ 'username': 'Error',
59
+ 'realname': str(e),
60
+ 'nsid': '',
61
+ 'flickr_url': url,
62
+ 'license': 'Unknown'
63
+ }
64
+
65
+ def process_urls_in_chunks(urls, chunk_size=100000):
66
+ authors = []
67
+ with Pool(cpu_count(), initializer=init_worker) as pool:
68
+ try:
69
+ # Process URLs in chunks
70
+ for i in range(0, len(urls), chunk_size):
71
+ chunk = urls[i:i + chunk_size]
72
+ chunk_results = list(tqdm.tqdm(
73
+ pool.imap(process_url, chunk),
74
+ total=len(chunk),
75
+ desc=f"Processing chunk {i//chunk_size + 1}"
76
+ ))
77
+ authors.extend(chunk_results)
78
+ except KeyboardInterrupt:
79
+ pool.terminate()
80
+ pool.join()
81
+ print("\nProcessing interrupted by user")
82
+ sys.exit(1)
83
+ return authors
84
+
85
+ if __name__ == "__main__":
86
+ urls_file = "data/openimages_urls.txt"
87
+ with open(urls_file) as f:
88
+ urls = [url.strip() for url in f.readlines()][:100000]
89
+
90
+ authors = process_urls_in_chunks(urls)
91
+
92
+ # Count unique authors
93
+ unique_authors = len(set([author['username'] for author in authors]))
94
+ print(f"unique_authors: {unique_authors}")
95
+ print(f"Number of unique authors: {unique_authors}")
data/openimages_index.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190ffe0f10bee7782f1f99a3ec340da49b41a3402988df4e5d8b3f57ed2fbacb
3
+ size 102102985
data/openimages_urls.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071fa57fc38809e0d50c91bff77ebba6c9b4ce3aa3e1fa803f24de0b5411c93f
3
+ size 519686095
data_prep.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tarfile
3
+ import shutil
4
+
5
+ # Define the base directory (update this to your folder location)
6
+ base_dir = "..."
7
+
8
+ # Define the output directory
9
+ output_dir = os.path.join(base_dir, "all_extracted")
10
+
11
+ # Create the output directory if it doesn't exist
12
+ os.makedirs(output_dir, exist_ok=True)
13
+
14
+ # Iterate over the folders and files in the base directory
15
+ for root, dirs, files in os.walk(base_dir):
16
+ for file in files:
17
+ if file.endswith(".tar"):
18
+ tar_path = os.path.join(root, file)
19
+ tar_name = os.path.basename(file)[:-4]
20
+ print(f"Extracting: {tar_path}")
21
+
22
+ # Open the .tar file and extract its contents
23
+ with tarfile.open(tar_path) as tar:
24
+ # Extract to a temporary location
25
+ temp_dir = os.path.join(base_dir, "temp_extract")
26
+ os.makedirs(temp_dir, exist_ok=True)
27
+ tar.extractall(temp_dir)
28
+
29
+
30
+ # Move the extracted files to the output directory
31
+ for extracted_file in os.listdir(temp_dir):
32
+ source_path = os.path.join(temp_dir, extracted_file)
33
+ target_path = os.path.join(output_dir, f"{tar_name}_{extracted_file}")
34
+ shutil.move(source_path, target_path)
35
+
36
+ # Clean up the temporary directory
37
+ shutil.rmtree(temp_dir)
38
+
39
+ print(f"All files extracted to: {output_dir}")
request.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import faiss
3
+ import numpy as np
4
+ import torch
5
+ from torchvision import transforms
6
+
7
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
+
9
+ transform = transforms.Compose([
10
+ transforms.ToTensor(),
11
+ transforms.Resize(256),
12
+ transforms.CenterCrop(224),
13
+ transforms.Normalize(
14
+ mean=(0.485, 0.456, 0.406),
15
+ std=(0.229, 0.224, 0.225)
16
+ )
17
+ ])
18
+
19
+ def get_ft(
20
+ extractor: torch.nn.Module,
21
+ img: Image.Image
22
+ ) -> np.ndarray:
23
+ img = transform(img)
24
+ ft = extractor(img.unsqueeze(0).to(device))
25
+ return ft.detach().cpu().numpy()
26
+
27
+ def get_topk(
28
+ index: faiss.Index,
29
+ ft: np.ndarray,
30
+ topk: int = 10
31
+ ) -> tuple[np.ndarray, np.ndarray]:
32
+ """
33
+ Get top-k nearest neighbors from the index
34
+ Args:
35
+ index: Faiss index
36
+ ft: Input feature
37
+ topk: Number of nearest neighbors to return
38
+ Returns:
39
+ Tuple of (distances, indices) for top-k matches
40
+ """
41
+ # Search index for nearest neighbors
42
+ distances, indices = index.search(ft, topk)
43
+ return distances, indices
44
+
45
+
46
+ # EXAMPLE:
47
+
48
+ # image = Image.open('path/to/your/image.jpg')
49
+ # image = transform(image)
50
+
51
+ # extractor = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
52
+ # extractor.eval()
53
+ # extractor.to(device)
54
+
55
+ # ft = get_ft(...)
56
+ # indices, distances = ...
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9cfce4c1d93ce8a9f05048c712cc8123d2961a2f8cab06f17c76aa15faa5ad
3
+ size 74
static/1.webp ADDED
static/2.webp ADDED
static/3.webp ADDED
static/RO_Summary.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab4cb5a27b5cf7ad13089c6655efd2d9dbb3786348485c400ef712c3f48e8de1
3
+ size 4906779
static/Reg_Summary.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7943ae6d56647d14de48ef9b8d288adcd522fb7a7ba8baf8c9de214a071e9d29
3
+ size 42656
static/checkpoints/dinov2_vits14_pretrain.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b938bf1bc15cd2ec0feacfe3a1bb553fe8ea9ca46a7e1d8d00217f29aef60cd9
3
+ size 88283115
static/facebookresearch_dinov2_main/.github/workflows/lint.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Lint
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ pull_request:
8
+ branches:
9
+ - main
10
+
11
+ jobs:
12
+ run-linters:
13
+ name: Run linters
14
+ runs-on: ubuntu-20.04
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v3
19
+ - name: Set up Python
20
+ uses: actions/setup-python@v4
21
+ with:
22
+ python-version: 3.9
23
+ cache: 'pip'
24
+ cache-dependency-path: '**/requirements*.txt'
25
+ - name: Install Python (development) dependencies
26
+ run: |
27
+ pip install -r requirements-dev.txt
28
+ - name: Run flake8
29
+ run: |
30
+ flake8
31
+ - name: Run black
32
+ if: always()
33
+ run: |
34
+ black --check dinov2
35
+ - name: Run pylint
36
+ if: always()
37
+ run: |
38
+ pylint --exit-zero dinov2
static/facebookresearch_dinov2_main/.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ build/
2
+ dist/
3
+ *.egg-info/
4
+ **/__pycache__/
5
+
6
+ **/.ipynb_checkpoints
7
+ **/.ipynb_checkpoints/**
8
+
9
+ *.swp
10
+
11
+ .vscode/
static/facebookresearch_dinov2_main/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to make participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies within all project spaces, and it also applies when
49
+ an individual is representing the project or its community in public spaces.
50
+ Examples of representing a project or community include using an official
51
+ project e-mail address, posting via an official social media account, or acting
52
+ as an appointed representative at an online or offline event. Representation of
53
+ a project may be further defined and clarified by project maintainers.
54
+
55
+ This Code of Conduct also applies outside the project spaces when there is a
56
+ reasonable belief that an individual's behavior may have a negative impact on
57
+ the project or its community.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported by contacting the project team at <[email protected]>. All
63
+ complaints will be reviewed and investigated and will result in a response that
64
+ is deemed necessary and appropriate to the circumstances. The project team is
65
+ obligated to maintain confidentiality with regard to the reporter of an incident.
66
+ Further details of specific enforcement policies may be posted separately.
67
+
68
+ Project maintainers who do not follow or enforce the Code of Conduct in good
69
+ faith may face temporary or permanent repercussions as determined by other
70
+ members of the project's leadership.
71
+
72
+ ## Attribution
73
+
74
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
+ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
+
77
+ [homepage]: https://www.contributor-covenant.org
78
+
79
+ For answers to common questions about this code of conduct, see
80
+ https://www.contributor-covenant.org/faq
static/facebookresearch_dinov2_main/CONTRIBUTING.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing to DINOv2
2
+ We want to make contributing to this project as easy and transparent as
3
+ possible.
4
+
5
+ ## Pull Requests
6
+ We actively welcome your pull requests.
7
+
8
+ 1. Fork the repo and create your branch from `main`.
9
+ 2. If you've added code that should be tested, add tests.
10
+ 3. If you've changed APIs, update the documentation.
11
+ 4. Ensure the test suite passes.
12
+ 5. Make sure your code lints.
13
+ 6. If you haven't already, complete the Contributor License Agreement ("CLA").
14
+
15
+ ## Contributor License Agreement ("CLA")
16
+ In order to accept your pull request, we need you to submit a CLA. You only need
17
+ to do this once to work on any of Meta's open source projects.
18
+
19
+ Complete your CLA here: <https://code.facebook.com/cla>
20
+
21
+ ## Issues
22
+ We use GitHub issues to track public bugs. Please ensure your description is
23
+ clear and has sufficient instructions to be able to reproduce the issue.
24
+
25
+ Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe
26
+ disclosure of security bugs. In those cases, please go through the process
27
+ outlined on that page and do not file a public issue.
28
+
29
+ ## License
30
+ By contributing to DINOv2, you agree that your contributions will be licensed
31
+ under the LICENSE file in the root directory of this source tree.
static/facebookresearch_dinov2_main/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
static/facebookresearch_dinov2_main/MODEL_CARD.md ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Model Card for DINOv2-S/B/L/g
2
+
3
+ These are Vision Transformer models trained following the method described in the papers:
4
+ "DINOv2: Learning Robust Visual Features without Supervision"
5
+ and
6
+ "Vision Transformers Need Registers".
7
+
8
+ We provide 8 models:
9
+ - 1 ViT-g trained from scratch with 3 ViT-S/B/L models distilled from the ViT-g, without registers.
10
+ - 1 ViT-g trained from scratch with 3 ViT-S/B/L models distilled from the ViT-g, with registers.
11
+
12
+ ## Model Details
13
+ The model takes an image as input and returns a class token and patch tokens, and optionally 4 register tokens.
14
+
15
+ The embedding dimension is:
16
+ - 384 for ViT-S.
17
+ - 768 for ViT-B.
18
+ - 1024 for ViT-L.
19
+ - 1536 for ViT-g.
20
+
21
+ The models follow a Transformer architecture, with a patch size of 14. In the case of registers, we add 4 register tokens, learned during training, to the input sequence after the patch embedding.
22
+
23
+ For a 224x224 image, this results in 1 class token + 256 patch tokens, and optionally 4 register tokens.
24
+
25
+ The models can accept larger images provided the image shapes are multiples of the patch size (14).
26
+ If this condition is not verified, the model will crop to the closest smaller multiple of the patch size.
27
+
28
+ ### Model Description
29
+
30
+ - **Developed by:** Meta AI
31
+ - **Model type:** Vision Transformer
32
+ - **License:** Apache License 2.0
33
+
34
+ - **Repository:** https://github.com/facebookresearch/dinov2
35
+ - **Paper:** https://arxiv.org/abs/2304.07193
36
+ - **Demo:** https://dinov2.metademolab.com/
37
+
38
+ ## Uses
39
+
40
+ The models are vision backbones providing multi-purpose features for downstream tasks.
41
+
42
+ ### Direct Use
43
+
44
+ The models can be used without fine-tuning, with downstream classifiers as simple as linear layers, to obtain competitive results:
45
+ - on depth estimation, semantic segmentation, using linear layers.
46
+ - on image classification, using k-NN classifiers on the class token.
47
+ - on image classification, with logistic regression classifiers applied on the class token.
48
+ - on image classification, with a linear layer applied on the class token and the average of the patch tokens.
49
+ - on image retrieval using nearest neighbors.
50
+
51
+ ### Downstream Use
52
+
53
+ It is technically possible to perform fine-tuning on the models, for small gains (we measured +2% on ImageNet-1k classification).
54
+ We recommend keeping this as a very last step and only when necessary, as the features already provide good performance out-of-the-box.
55
+
56
+ ## Bias, Risks, and Limitations
57
+
58
+ Despite improvements thanks to the training method not using annotations, we still observe significant biases in our models toward rich households from Western countries.
59
+
60
+ ### Recommendations
61
+
62
+ We expect fine-tuning will increase the biases in the features produced by the model as they will be tuned to the fine-tuning labels.
63
+
64
+ ## How to Get Started with the Model
65
+
66
+ Use the code below to get started with the model.
67
+
68
+ ```python
69
+ import torch
70
+
71
+ # DINOv2
72
+ dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
73
+ dinov2_vitb14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14')
74
+ dinov2_vitl14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14')
75
+ dinov2_vitg14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14')
76
+
77
+ # DINOv2 with registers
78
+ dinov2_vits14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_reg')
79
+ dinov2_vitb14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_reg')
80
+ dinov2_vitl14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_reg')
81
+ dinov2_vitg14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_reg')
82
+ ```
83
+
84
+ ## Training Details
85
+
86
+ ### Training Data
87
+
88
+ - **Training data:** LVD-142M (see paper)
89
+ - **Training regime:** fp16 using PyTorch-FSDP mixed-precision.
90
+
91
+ ### Training Procedure
92
+
93
+ - **Training objective:**
94
+ - DINO self-distillation loss with multi-crop
95
+ - iBOT masked-image modeling loss
96
+ - KoLeo regularization on [CLS] tokens
97
+ - **Architectures:**
98
+ - ViT-S (21M params): Patch size 14, embedding dimension 384, 6 heads, MLP FFN
99
+ - ViT-B (86M params): Patch size 14, embedding dimension 768, 12 heads, MLP FFN
100
+ - ViT-L (0.3B params): Patch size 14, embedding dimension 1024, 16 heads, MLP FFN
101
+ - ViT-g (1.1B params): Patch size 14, embedding dimension 1536, 24 heads, SwiGLU FFN
102
+ - **Distillation:**
103
+ - Distillation follows the standard DINOv2 pretraining procedure, except the teacher is a pretrained ViT-g, frozen.
104
+
105
+ ## Evaluation
106
+
107
+ We refer users to the associated papers for the evaluation protocols.
108
+
109
+ <table>
110
+ <tr>
111
+ <th colspan="2"></th>
112
+ <th colspan="3">ImageNet-1k</th>
113
+ <th>NYU-Depth v2</th>
114
+ <th>SUN-RGBD</th>
115
+ <th>ADE20k</th>
116
+ <th>iNaturalist 2018</th>
117
+ <th>Oxford-H</th>
118
+ </tr>
119
+ <tr>
120
+ <th rowspan="2">model</th>
121
+ <th rowspan="2">with <br /> registers</th>
122
+ <th>classif. (acc)</th>
123
+ <th>classif. (acc)</th>
124
+ <th>classif. V2 (acc)</th>
125
+ <th>depth (RMSE)</th>
126
+ <th>depth (RMSE)</th>
127
+ <th>segm. (mAP)</th>
128
+ <th>classif. (acc)</th>
129
+ <th>retrieval (mAP)</th>
130
+ </tr>
131
+ <tr>
132
+ <!-- <th>^</th> -->
133
+ <th>k-NN</th>
134
+ <th>linear</th>
135
+ <th>linear</th>
136
+ <th>linear<br />4 layers</th>
137
+ <th>NYU-D transfer</th>
138
+ <th>multiscale</th>
139
+ <th>linear</th>
140
+ <th>nearest neighbor</th>
141
+ </tr>
142
+ <tr>
143
+ <td>ViT-S/14</td>
144
+ <td align="center">:x:</td>
145
+ <td align="right">79.0%</td>
146
+ <td align="right">81.1%</td>
147
+ <td align="right">70.8%</td>
148
+ <td align="right">0.417</td>
149
+ <td align="right">0.431</td>
150
+ <td align="right">47.2</td>
151
+ <td align="right">69.5%</td>
152
+ <td align="right">43.2</td>
153
+ </tr>
154
+ <tr>
155
+ <td>ViT-S/14</td>
156
+ <td align="center">:white_check_mark:</td>
157
+ <td align="right">79.1%</td>
158
+ <td align="right">80.9%</td>
159
+ <td align="right">71.0%</td>
160
+ <td align="right">N/A</td>
161
+ <td align="right">N/A</td>
162
+ <td align="right">N/A</td>
163
+ <td align="right">67.6%</td>
164
+ <td align="right">39.5</td>
165
+ </tr>
166
+ <tr>
167
+ <td>ViT-B/14</td>
168
+ <td align="center">:x:</td>
169
+ <td align="right">82.1%</td>
170
+ <td align="right">84.5%</td>
171
+ <td align="right">74.9%</td>
172
+ <td align="right">0.362</td>
173
+ <td align="right">0.400</td>
174
+ <td align="right">51.3</td>
175
+ <td align="right">76.3%</td>
176
+ <td align="right">49.5</td>
177
+ </tr>
178
+ <td>ViT-B/14</td>
179
+ <td align="center">:white_check_mark:</td>
180
+ <td align="right">82.0%</td>
181
+ <td align="right">84.6%</td>
182
+ <td align="right">75.6%</td>
183
+ <td align="right">N/A</td>
184
+ <td align="right">N/A</td>
185
+ <td align="right">N/A</td>
186
+ <td align="right">73.8%</td>
187
+ <td align="right">51.0</td>
188
+ </tr>
189
+ <tr>
190
+ <td>ViT-L/14</td>
191
+ <td align="center">:x:</td>
192
+ <td align="right">83.5%</td>
193
+ <td align="right">86.3%</td>
194
+ <td align="right">77.6%</td>
195
+ <td align="right">0.333</td>
196
+ <td align="right">0.396</td>
197
+ <td align="right">53.1</td>
198
+ <td align="right">79.8%</td>
199
+ <td align="right">54.0</td>
200
+ </tr>
201
+ <tr>
202
+ <td>ViT-L/14</td>
203
+ <td align="center">:white_check_mark:</td>
204
+ <td align="right">83.8%</td>
205
+ <td align="right">86.7%</td>
206
+ <td align="right">78.5%</td>
207
+ <td align="right">N/A</td>
208
+ <td align="right">N/A</td>
209
+ <td align="right">N/A</td>
210
+ <td align="right">80.9%</td>
211
+ <td align="right">55.7</td>
212
+ </tr>
213
+ <tr>
214
+ <td>ViT-g/14</td>
215
+ <td align="center">:x:</td>
216
+ <td align="right">83.5%</td>
217
+ <td align="right">86.5%</td>
218
+ <td align="right">78.4%</td>
219
+ <td align="right">0.298</td>
220
+ <td align="right">0.362</td>
221
+ <td align="right">53.0</td>
222
+ <td align="right">81.6%</td>
223
+ <td align="right">52.3</td>
224
+ </tr>
225
+ <tr>
226
+ <tr>
227
+ <td>ViT-g/14</td>
228
+ <td align="center">:white_check_mark:</td>
229
+ <td align="right">83.7%</td>
230
+ <td align="right">87.1%</td>
231
+ <td align="right">78.8%</td>
232
+ <td align="right">N/A</td>
233
+ <td align="right">N/A</td>
234
+ <td align="right">N/A</td>
235
+ <td align="right">81.5%</td>
236
+ <td align="right">58.2</td>
237
+ </tr>
238
+ </table>
239
+
240
+ ## Environmental Impact
241
+
242
+ - **Hardware Type:** Nvidia A100
243
+ - **Hours used:** 22,000 for ViT-g, 4,500 for ViT-S distillation, 5,300 for ViT-B distillation, 8,000 for ViT-L distillation
244
+ - **Cloud Provider:** Private infra
245
+ - **Compute Region:** USA
246
+ - **Carbon Emitted:** 7t CO2eq
247
+
248
+ #### Hardware
249
+
250
+ Nvidia A100 GPUs
251
+
252
+ #### Software
253
+
254
+ PyTorch 2.0,
255
+ xFormers 0.0.18
256
+
257
+ **BibTeX**
258
+
259
+ ```
260
+ @misc{oquab2023dinov2,
261
+ title={DINOv2: Learning Robust Visual Features without Supervision},
262
+ author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},
263
+ journal={arXiv:2304.07193},
264
+ year={2023}
265
+ }
266
+ @misc{darcet2023vitneedreg,
267
+ title={Vision Transformers Need Registers},
268
+ author={Darcet, Timothée and Oquab, Maxime and Mairal, Julien and Bojanowski, Piotr},
269
+ journal={arXiv:2309.16588},
270
+ year={2023}
271
+ }
272
+ ```
static/facebookresearch_dinov2_main/README.md ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ :new: [2023-10-26] *Added DINOv2 backbones with registers, following [Vision Transformers Need Registers](https://arxiv.org/abs/2309.16588).*
2
+
3
+ # DINOv2: Learning Robust Visual Features without Supervision
4
+
5
+ **[Meta AI Research, FAIR](https://ai.facebook.com/research/)**
6
+
7
+ Maxime Oquab,
8
+ Timothée Darcet,
9
+ Théo Moutakanni,
10
+ Huy V. Vo,
11
+ Marc Szafraniec,
12
+ Vasil Khalidov,
13
+ Patrick Labatut,
14
+ Armand Joulin,
15
+ Piotr Bojanowski
16
+
17
+ [[`Paper #1`](https://arxiv.org/abs/2304.07193)] [`Paper #2`](https://arxiv.org/abs/2309.16588)] [[`Blog`](https://ai.facebook.com/blog/dino-v2-computer-vision-self-supervised-learning/)] [[`Demo`](https://dinov2.metademolab.com)] [[`BibTeX`](#citing-dinov2)]
18
+
19
+ PyTorch implementation and pretrained models for DINOv2. For details, see the papers: **[DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193)** and **[Vision Transformers Need Registers](https://arxiv.org/abs/2309.16588)**.
20
+
21
+ DINOv2 models produce high-performance visual features that can be directly employed with classifiers as simple as linear layers on a variety of computer vision tasks; these visual features are robust and perform well across domains without any requirement for fine-tuning. The models were pretrained on a dataset of 142 M images without using any labels or annotations.
22
+
23
+ https://github.com/facebookresearch/dinov2/assets/60359573/f168823e-7922-415a-b429-578badf5c356
24
+
25
+ <div align="center">
26
+ Visualization of the three first principal components of the patch features of all frames, mapped to RGB values.
27
+ </div>
28
+
29
+ ## Pretrained models
30
+
31
+ <table style="margin: auto">
32
+ <thead>
33
+ <tr>
34
+ <th>model</th>
35
+ <th># of<br />params</th>
36
+ <th>with<br />registers</th>
37
+ <th>ImageNet<br />k-NN</th>
38
+ <th>ImageNet<br />linear</th>
39
+ <th>download</th>
40
+ </tr>
41
+ </thead>
42
+ <tbody>
43
+ <tr>
44
+ <td>ViT-S/14 distilled</td>
45
+ <td align="right">21 M</td>
46
+ <td align="center">:x:</td>
47
+ <td align="right">79.0%</td>
48
+ <td align="right">81.1%</td>
49
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth">backbone only</a></td>
50
+ </tr>
51
+ <tr>
52
+ <td>ViT-S/14 distilled</td>
53
+ <td align="right">21 M</td>
54
+ <td align="center">:white_check_mark:</td>
55
+ <td align="right">79.1%</td>
56
+ <td align="right">80.9%</td>
57
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_pretrain.pth">backbone only</a></td>
58
+ </tr>
59
+ <tr>
60
+ <td>ViT-B/14 distilled</td>
61
+ <td align="right">86 M</td>
62
+ <td align="center">:x:</td>
63
+ <td align="right">82.1%</td>
64
+ <td align="right">84.5%</td>
65
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth">backbone only</a></td>
66
+ </tr>
67
+ <tr>
68
+ <td>ViT-B/14 distilled</td>
69
+ <td align="right">86 M</td>
70
+ <td align="center">:white_check_mark:</td>
71
+ <td align="right">82.0%</td>
72
+ <td align="right">84.6%</td>
73
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_pretrain.pth">backbone only</a></td>
74
+ </tr>
75
+ <tr>
76
+ <td>ViT-L/14 distilled</td>
77
+ <td align="right">300 M</td>
78
+ <td align="center">:x:</td>
79
+ <td align="right">83.5%</td>
80
+ <td align="right">86.3%</td>
81
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth">backbone only</a></td>
82
+ </tr>
83
+ <tr>
84
+ <td>ViT-L/14 distilled</td>
85
+ <td align="right">300 M</td>
86
+ <td align="center">:white_check_mark:</td>
87
+ <td align="right">83.8%</td>
88
+ <td align="right">86.7%</td>
89
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_pretrain.pth">backbone only</a></td>
90
+ </tr>
91
+ <tr>
92
+ <td>ViT-g/14</td>
93
+ <td align="right">1,100 M</td>
94
+ <td align="center">:x:</td>
95
+ <td align="right">83.5%</td>
96
+ <td align="right">86.5%</td>
97
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth">backbone only</a></td>
98
+ </tr>
99
+ <tr>
100
+ <td>ViT-g/14</td>
101
+ <td align="right">1,100 M</td>
102
+ <td align="center">:white_check_mark:</td>
103
+ <td align="right">83.7%</td>
104
+ <td align="right">87.1%</td>
105
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_pretrain.pth">backbone only</a></td>
106
+ </tr>
107
+ </tbody>
108
+ </table>
109
+
110
+ ### Pretrained backbones (via PyTorch Hub)
111
+
112
+ Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install PyTorch (the only required dependency for loading the model). Installing PyTorch with CUDA support is strongly recommended.
113
+
114
+ A corresponding [model card](MODEL_CARD.md) is included in the repository.
115
+
116
+ ```python
117
+ import torch
118
+
119
+ # DINOv2
120
+ dinov2_vits14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14')
121
+ dinov2_vitb14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14')
122
+ dinov2_vitl14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14')
123
+ dinov2_vitg14 = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14')
124
+
125
+ # DINOv2 with registers
126
+ dinov2_vits14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_reg')
127
+ dinov2_vitb14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_reg')
128
+ dinov2_vitl14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_reg')
129
+ dinov2_vitg14_reg = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_reg')
130
+ ```
131
+
132
+ ### Pretrained heads - Image classification
133
+
134
+ <table style="margin: auto">
135
+ <thead>
136
+ <tr>
137
+ <th rowspan="2">backbone</th>
138
+ <th rowspan="2">with<br />registers</th>
139
+ <th>download</th>
140
+ </tr>
141
+ <tr>
142
+ <th>ImageNet</th>
143
+ </tr>
144
+ </thead>
145
+ <tbody>
146
+ <tr>
147
+ <td>ViT-S/14 distilled</td>
148
+ <td align="center">:x:</td>
149
+ <td>
150
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth">1 layer</a>,
151
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear4_head.pth">4 layers</a>)
152
+ </td>
153
+ </tr>
154
+ <tr>
155
+ <td>ViT-S/14 distilled</td>
156
+ <td align="center">:white_check_mark:</td>
157
+ <td>
158
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear_head.pth">1 layer</a>,
159
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear4_head.pth">4 layers</a>)
160
+ </td>
161
+ </tr>
162
+ <tr>
163
+ <td>ViT-B/14 distilled</td>
164
+ <td align="center">:x:</td>
165
+ <td>
166
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth">1 layer</a>,
167
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear4_head.pth">4 layers</a>)
168
+ </tr>
169
+ <tr>
170
+ <td>ViT-B/14 distilled</td>
171
+ <td align="center">:white_check_mark:</td>
172
+ <td>
173
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear_head.pth">1 layer</a>,
174
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear4_head.pth">4 layers</a>)
175
+ </tr>
176
+ <tr>
177
+ <td>ViT-L/14 distilled</td>
178
+ <td align="center">:x:</td>
179
+ <td>
180
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth">1 layer</a>,
181
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear4_head.pth">4 layers</a>)
182
+ </tr>
183
+ <tr>
184
+ <td>ViT-L/14 distilled</td>
185
+ <td align="center">:white_check_mark:</td>
186
+ <td>
187
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear_head.pth">1 layer</a>,
188
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear4_head.pth">4 layers</a>)
189
+ </tr>
190
+ <tr>
191
+ <td>ViT-g/14</td>
192
+ <td align="center">:x:</td>
193
+ <td>
194
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth">1 layer</a>,
195
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear4_head.pth">4 layers</a>)
196
+ </tr>
197
+ <tr>
198
+ <td>ViT-g/14</td>
199
+ <td align="center">:white_check_mark:</td>
200
+ <td>
201
+ linear head (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_lreg4_inear_head.pth">1 layer</a>,
202
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_linear4_head.pth">4 layers</a>)
203
+ </tr>
204
+ </tbody>
205
+ </table>
206
+
207
+ The (full) classifier models can be loaded via PyTorch Hub:
208
+
209
+ ```python
210
+ import torch
211
+
212
+ # DINOv2
213
+ dinov2_vits14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_lc')
214
+ dinov2_vitb14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_lc')
215
+ dinov2_vitl14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_lc')
216
+ dinov2_vitg14_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_lc')
217
+
218
+ # DINOv2 with registers
219
+ dinov2_vits14_reg_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vits14_reg_lc')
220
+ dinov2_vitb14_reg_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitb14_reg_lc')
221
+ dinov2_vitl14_reg_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitl14_reg_lc')
222
+ dinov2_vitg14_reg_lc = torch.hub.load('facebookresearch/dinov2', 'dinov2_vitg14_reg_lc')
223
+ ```
224
+
225
+ ### Pretrained heads - Depth estimation
226
+
227
+ <table style="margin: auto">
228
+ <thead>
229
+ <tr>
230
+ <th rowspan="2">backbone</th>
231
+ <th colspan="2">download head</th>
232
+ </tr>
233
+ <tr>
234
+ <th>NYUd</th>
235
+ <th>KITTI</th>
236
+ </tr>
237
+ </thead>
238
+ <tbody>
239
+ <tr>
240
+ <td>ViT-S/14 distilled</td>
241
+ <td>
242
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_linear_head.pth">1 layer</a>,
243
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_linear4_head.pth">4 layers</a>),
244
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_nyu_dpt_head.pth">DPT</a>
245
+ </td>
246
+ <td>
247
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_linear_head.pth">1 layer</a>,
248
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_linear4_head.pth">4 layers</a>),
249
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_kitti_dpt_head.pth">DPT</a>
250
+ </td>
251
+ </tr>
252
+ <tr>
253
+ <td>ViT-B/14 distilled</td>
254
+ <td>
255
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth">1 layer</a>,
256
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_linear4_head.pth">4 layers</a>),
257
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_nyu_dpt_head.pth">DPT</a>
258
+ </td>
259
+ <td>
260
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_linear_head.pth">1 layer</a>,
261
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_linear4_head.pth">4 layers</a>),
262
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_kitti_dpt_head.pth">DPT</a>
263
+ </td>
264
+ </tr>
265
+ <tr>
266
+ <td>ViT-L/14 distilled</td>
267
+ <td>
268
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth">1 layer</a>,
269
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_linear4_head.pth">4 layers</a>),
270
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_nyu_dpt_head.pth">DPT</a>
271
+ </td>
272
+ <td>
273
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_linear_head.pth">1 layer</a>,
274
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_linear4_head.pth">4 layers</a>),
275
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_kitti_dpt_head.pth">DPT</a>
276
+ </td>
277
+ </tr>
278
+ <tr>
279
+ <td>ViT-g/14</td>
280
+ <td>
281
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth">1 layer</a>,
282
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_linear4_head.pth">4 layers</a>),
283
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_nyu_dpt_head.pth">DPT</a>
284
+ </td>
285
+ <td>
286
+ linear (<a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_linear_head.pth">1 layer</a>,
287
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_linear4_head.pth">4 layers</a>),
288
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_kitti_dpt_head.pth">DPT</a>
289
+ </td>
290
+ </tr>
291
+ </tbody>
292
+ </table>
293
+
294
+ ### Pretrained heads - Semantic segmentation
295
+
296
+ <table style="margin: auto">
297
+ <thead>
298
+ <tr>
299
+ <th rowspan="2">backbone</th>
300
+ <th>download model</th>
301
+ <th colspan="2">download head</th>
302
+ </tr>
303
+ <tr>
304
+ <th>ADE20K</th>
305
+ <th>ADE20K</th>
306
+ <th>VOC2012</th>
307
+ </tr>
308
+ </thead>
309
+ <tbody>
310
+ <tr>
311
+ <td>ViT-S/14 distilled</td>
312
+ <td></td>
313
+ <td>
314
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_ade20k_linear_head.pth">linear</a>,
315
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_ade20k_ms_head.pth">multi-scale</a>
316
+ </td>
317
+ <td>
318
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_voc2012_linear_head.pth">linear</a>,
319
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_voc2012_ms_head.pth">multi-scale</a>
320
+ </td>
321
+ </tr>
322
+ <tr>
323
+ <td>ViT-B/14 distilled</td>
324
+ <td></td>
325
+ <td>
326
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_ade20k_linear_head.pth">linear</a>,
327
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_ade20k_ms_head.pth">multi-scale</a>
328
+ </td>
329
+ <td>
330
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_voc2012_linear_head.pth">linear</a>,
331
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_voc2012_ms_head.pth">multi-scale</a>
332
+ </td>
333
+ </tr>
334
+ <tr>
335
+ <td>ViT-L/14 distilled</td>
336
+ <td></td>
337
+ <td>
338
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_ade20k_linear_head.pth">linear</a>,
339
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_ade20k_ms_head.pth">multi-scale</a>
340
+ </td>
341
+ <td>
342
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_voc2012_linear_head.pth">linear</a>,
343
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_voc2012_ms_head.pth">multi-scale</a>
344
+ </td>
345
+ </tr>
346
+ <tr>
347
+ <td>ViT-g/14</td>
348
+ <td>
349
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_ade20k_m2f.pth">Mask2Former</a>
350
+ </td>
351
+ <td>
352
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_ade20k_linear_head.pth">linear</a>,
353
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_ade20k_ms_head.pth">multi-scale</a>
354
+ </td>
355
+ <td>
356
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_voc2012_linear_head.pth">linear</a>,
357
+ <a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_voc2012_ms_head.pth">multi-scale</a>
358
+ </td>
359
+ </tr>
360
+ </tbody>
361
+ </table>
362
+
363
+ ## Installation
364
+
365
+ The training and evaluation code requires PyTorch 2.0 and [xFormers](https://github.com/facebookresearch/xformers) 0.0.18 as well as a number of other 3rd party packages. Note that the code has only been tested with the specified versions and also expects a Linux environment. To setup all the required dependencies for training and evaluation, please follow the instructions below:
366
+
367
+ *[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html)* **(Recommended)** - Clone the repository and then create and activate a `dinov2` conda environment using the provided environment definition:
368
+
369
+ ```shell
370
+ conda env create -f conda.yaml
371
+ conda activate dinov2
372
+ ```
373
+
374
+ *[pip](https://pip.pypa.io/en/stable/getting-started/)* - Clone the repository and then use the provided `requirements.txt` to install the dependencies:
375
+
376
+ ```shell
377
+ pip install -r requirements.txt
378
+ ```
379
+
380
+ For dense tasks (depth estimation and semantic segmentation), there are additional dependencies (specific versions of `mmcv` and `mmsegmentation`) which are captured in the `extras` dependency specifications:
381
+
382
+ *[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html)* **(Recommended)**:
383
+
384
+ ```shell
385
+ conda env create -f conda-extras.yaml
386
+ conda activate dinov2-extras
387
+ ```
388
+
389
+ *[pip](https://pip.pypa.io/en/stable/getting-started/)*:
390
+
391
+ ```shell
392
+ pip install -r requirements.txt -r requirements-extras.txt
393
+ ```
394
+
395
+ ## Data preparation
396
+
397
+ ### ImageNet-1k
398
+
399
+ The root directory of the dataset should hold the following contents:
400
+
401
+ - `<ROOT>/test/ILSVRC2012_test_00000001.JPEG`
402
+ - `<ROOT>/test/[..]`
403
+ - `<ROOT>/test/ILSVRC2012_test_00100000.JPEG`
404
+ - `<ROOT>/train/n01440764/n01440764_10026.JPEG`
405
+ - `<ROOT>/train/[...]`
406
+ - `<ROOT>/train/n15075141/n15075141_9993.JPEG`
407
+ - `<ROOT>/val/n01440764/ILSVRC2012_val_00000293.JPEG`
408
+ - `<ROOT>/val/[...]`
409
+ - `<ROOT>/val/n15075141/ILSVRC2012_val_00049174.JPEG`
410
+ - `<ROOT>/labels.txt`
411
+
412
+ The provided dataset implementation expects a few additional metadata files to be present under the extra directory:
413
+
414
+ - `<EXTRA>/class-ids-TRAIN.npy`
415
+ - `<EXTRA>/class-ids-VAL.npy`
416
+ - `<EXTRA>/class-names-TRAIN.npy`
417
+ - `<EXTRA>/class-names-VAL.npy`
418
+ - `<EXTRA>/entries-TEST.npy`
419
+ - `<EXTRA>/entries-TRAIN.npy`
420
+ - `<EXTRA>/entries-VAL.npy`
421
+
422
+ These metadata files can be generated (once) with the following lines of Python code:
423
+
424
+ ```python
425
+ from dinov2.data.datasets import ImageNet
426
+
427
+ for split in ImageNet.Split:
428
+ dataset = ImageNet(split=split, root="<ROOT>", extra="<EXTRA>")
429
+ dataset.dump_extra()
430
+ ```
431
+
432
+ Note that the root and extra directories do not have to be distinct directories.
433
+
434
+ ### ImageNet-22k
435
+
436
+ Please adapt the [dataset class](dinov2/data/datasets/image_net_22k.py) to match your local setup.
437
+
438
+ <br />
439
+
440
+ :warning: To execute the commands provided in the next sections for training and evaluation, the `dinov2` package should be included in the Python module search path, i.e. simply prefix the command to run with `PYTHONPATH=.`.
441
+
442
+ ## Training
443
+
444
+ ### Fast setup: training DINOv2 ViT-L/16 on ImageNet-1k
445
+
446
+ Run DINOv2 training on 4 A100-80GB nodes (32 GPUs) in a SLURM cluster environment with submitit:
447
+
448
+ ```shell
449
+ python dinov2/run/train/train.py \
450
+ --nodes 4 \
451
+ --config-file dinov2/configs/train/vitl16_short.yaml \
452
+ --output-dir <PATH/TO/OUTPUT/DIR> \
453
+ train.dataset_path=ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
454
+ ```
455
+
456
+ Training time is approximately 1 day and the resulting checkpoint should reach 81.6% on k-NN eval and 82.9% on linear eval.
457
+
458
+ The training code saves the weights of the teacher in the `eval` folder every 12500 iterations for evaluation.
459
+
460
+ ### Long setup: training DINOv2 ViT-L/14 on ImageNet-22k
461
+
462
+ Run DINOv2 training on 12 A100-80GB nodes (96 GPUs) in a SLURM cluster environment with submitit:
463
+
464
+ ```shell
465
+ python dinov2/run/train/train.py \
466
+ --nodes 12 \
467
+ --config-file dinov2/configs/train/vitl14.yaml \
468
+ --output-dir <PATH/TO/OUTPUT/DIR> \
469
+ train.dataset_path=ImageNet22k:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
470
+ ```
471
+
472
+ Training time is approximately 3.3 days and the resulting checkpoint should reach 82.0% on k-NN eval and 84.5% on linear eval.
473
+
474
+ The training code saves the weights of the teacher in the `eval` folder every 12500 iterations for evaluation.
475
+
476
+
477
+ ## Evaluation
478
+
479
+ The training code regularly saves the teacher weights. In order to evaluate the model, run the following evaluation on a single node:
480
+
481
+ ### k-NN classification on ImageNet-1k
482
+
483
+ ```shell
484
+ python dinov2/run/eval/knn.py \
485
+ --config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
486
+ --pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
487
+ --output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/knn \
488
+ --train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
489
+ --val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
490
+ ```
491
+
492
+ ### Logistic regression classification on ImageNet-1k
493
+
494
+ ```shell
495
+ python dinov2/run/eval/log_regression.py \
496
+ --config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
497
+ --pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
498
+ --output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/logreg \
499
+ --train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
500
+ --val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
501
+ ```
502
+
503
+ ### Linear classification with data augmentation on ImageNet-1k
504
+
505
+ ```shell
506
+ python dinov2/run/eval/linear.py \
507
+ --config-file <PATH/TO/OUTPUT/DIR>/config.yaml \
508
+ --pretrained-weights <PATH/TO/OUTPUT/DIR>/eval/training_24999/teacher_checkpoint.pth \
509
+ --output-dir <PATH/TO/OUTPUT/DIR>/eval/training_24999/linear \
510
+ --train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
511
+ --val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
512
+ ```
513
+
514
+ We release the weights from evaluating the different models:
515
+
516
+ <table style="margin: auto">
517
+ <tr>
518
+ <th>model</th>
519
+ <th>with<br />registers</th>
520
+ <th>ImageNet<br />top-1</th>
521
+ <th>linear evaluation</th>
522
+ </tr>
523
+ <tr>
524
+ <td>ViT-S/14 distilled</td>
525
+ <td align="center">:x:</td>
526
+ <td align="right">81.1%</td>
527
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_linear_head.pth">linear head weights</a></td>
528
+ </tr>
529
+ <tr>
530
+ <td>ViT-S/14 distilled</td>
531
+ <td align="center">:white_check_mark:</td>
532
+ <td align="right">80.8%</td>
533
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_reg4_linear_head.pth">linear head weights</a></td>
534
+ </tr>
535
+ <tr>
536
+ <td>ViT-B/14 distilled</td>
537
+ <td align="center">:x:</td>
538
+ <td align="right">84.5%</td>
539
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_linear_head.pth">linear head weights</a></td>
540
+ </tr>
541
+ <tr>
542
+ <td>ViT-B/14 distilled</td>
543
+ <td align="center">:white_check_mark:</td>
544
+ <td align="right">84.4%</td>
545
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_reg4_linear_head.pth">linear head weights</a></td>
546
+ </tr>
547
+ <tr>
548
+ <td>ViT-L/14 distilled</td>
549
+ <td align="center">:x:</td>
550
+ <td align="right">86.3%</td>
551
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_linear_head.pth">linear head weights</a></td>
552
+ </tr>
553
+ <tr>
554
+ <td>ViT-L/14 distilled</td>
555
+ <td align="center">:white_check_mark:</td>
556
+ <td align="right">86.5%</td>
557
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_reg4_linear_head.pth">linear head weights</a></td>
558
+ </tr>
559
+ <tr>
560
+ <td>ViT-g/14</td>
561
+ <td align="center">:x:</td>
562
+ <td align="right">86.5%</td>
563
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_linear_head.pth">linear head weights</a></td>
564
+ </tr>
565
+ <tr>
566
+ <td>ViT-g/14</td>
567
+ <td align="center">:white_check_mark:</td>
568
+ <td align="right">87.0%</td>
569
+ <td><a href="https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_reg4_linear_head.pth">linear head weights</a></td>
570
+ </tr>
571
+ </table>
572
+
573
+ The performance of the provided pretrained model weights can be evaluated as follows on ImageNet-1k:
574
+
575
+ ```shell
576
+ python dinov2/run/eval/linear.py \
577
+ --config-file dinov2/configs/eval/vitg14_pretrain.yaml \
578
+ --pretrained-weights https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth \
579
+ --train-dataset ImageNet:split=TRAIN:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET> \
580
+ --val-dataset ImageNet:split=VAL:root=<PATH/TO/DATASET>:extra=<PATH/TO/DATASET>
581
+ ```
582
+
583
+ ## Notebooks
584
+
585
+ A few notebooks are provided to help the community leverage the models and code:
586
+
587
+ <ul>
588
+ <li><a href="https://github.com/facebookresearch/dinov2/blob/main/notebooks/depth_estimation.ipynb">Depth estimation</a> - How to load and use the depth heads in combination with a matching backbone via mmcv</li>
589
+ <li><a href="https://github.com/facebookresearch/dinov2/blob/main/notebooks/semantic_segmentation.ipynb">Semantic segmentation</a> - How to load and use the segmentation heads in combination with a matching backbone via mmcv, and also how to load and use the Mask2Former-based segmentation model trained on ADE20K</li>
590
+ </ul>
591
+
592
+ ## License
593
+
594
+ DINOv2 code and model weights are released under the Apache License 2.0. See [LICENSE](LICENSE) for additional details.
595
+
596
+ ## Contributing
597
+
598
+ See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md).
599
+
600
+ ## Citing DINOv2
601
+
602
+ If you find this repository useful, please consider giving a star :star: and citation :t-rex::
603
+
604
+ ```
605
+ @misc{oquab2023dinov2,
606
+ title={DINOv2: Learning Robust Visual Features without Supervision},
607
+ author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},
608
+ journal={arXiv:2304.07193},
609
+ year={2023}
610
+ }
611
+ ```
612
+
613
+ ```
614
+ @misc{darcet2023vitneedreg,
615
+ title={Vision Transformers Need Registers},
616
+ author={Darcet, Timothée and Oquab, Maxime and Mairal, Julien and Bojanowski, Piotr},
617
+ journal={arXiv:2309.16588},
618
+ year={2023}
619
+ }
620
+ ```
static/facebookresearch_dinov2_main/conda-extras.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: dinov2-extras
2
+ channels:
3
+ - defaults
4
+ - pytorch
5
+ - nvidia
6
+ - xformers
7
+ - conda-forge
8
+ dependencies:
9
+ - python=3.9
10
+ - pytorch::pytorch=2.0.0
11
+ - pytorch::pytorch-cuda=11.7.0
12
+ - pytorch::torchvision=0.15.0
13
+ - omegaconf
14
+ - torchmetrics=0.10.3
15
+ - fvcore
16
+ - iopath
17
+ - xformers::xformers=0.0.18
18
+ - pip
19
+ - pip:
20
+ - git+https://github.com/facebookincubator/submitit
21
+ - --extra-index-url https://pypi.nvidia.com
22
+ - cuml-cu11
23
+ - mmcv-full==1.5.0
24
+ - mmsegmentation==0.27.0
static/facebookresearch_dinov2_main/conda.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: dinov2
2
+ channels:
3
+ - defaults
4
+ - pytorch
5
+ - nvidia
6
+ - xformers
7
+ - conda-forge
8
+ dependencies:
9
+ - python=3.9
10
+ - pytorch::pytorch=2.0.0
11
+ - pytorch::pytorch-cuda=11.7.0
12
+ - pytorch::torchvision=0.15.0
13
+ - omegaconf
14
+ - torchmetrics=0.10.3
15
+ - fvcore
16
+ - iopath
17
+ - xformers::xformers=0.0.18
18
+ - pip
19
+ - pip:
20
+ - git+https://github.com/facebookincubator/submitit
21
+ - --extra-index-url https://pypi.nvidia.com
22
+ - cuml-cu11
static/facebookresearch_dinov2_main/dinov2/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ __version__ = "0.0.1"
static/facebookresearch_dinov2_main/dinov2/configs/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ import pathlib
7
+
8
+ from omegaconf import OmegaConf
9
+
10
+
11
+ def load_config(config_name: str):
12
+ config_filename = config_name + ".yaml"
13
+ return OmegaConf.load(pathlib.Path(__file__).parent.resolve() / config_filename)
14
+
15
+
16
+ dinov2_default_config = load_config("ssl_default_config")
17
+
18
+
19
+ def load_and_merge_config(config_name: str):
20
+ default_config = OmegaConf.create(dinov2_default_config)
21
+ loaded_config = load_config(config_name)
22
+ return OmegaConf.merge(default_config, loaded_config)
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_pretrain.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_base
3
+ patch_size: 14
4
+ crops:
5
+ global_crops_size: 518 # this is to set up the position embeddings properly
6
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitb14_reg4_pretrain.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_base
3
+ patch_size: 14
4
+ num_register_tokens: 4
5
+ interpolate_antialias: true
6
+ interpolate_offset: 0.0
7
+ crops:
8
+ global_crops_size: 518 # this is to set up the position embeddings properly
9
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_pretrain.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_giant2
3
+ patch_size: 14
4
+ ffn_layer: swiglufused
5
+ crops:
6
+ global_crops_size: 518 # this is to set up the position embeddings properly
7
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitg14_reg4_pretrain.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_giant2
3
+ patch_size: 14
4
+ ffn_layer: swiglufused
5
+ num_register_tokens: 4
6
+ interpolate_antialias: true
7
+ interpolate_offset: 0.0
8
+ crops:
9
+ global_crops_size: 518 # this is to set up the position embeddings properly
10
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_pretrain.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_large
3
+ patch_size: 14
4
+ crops:
5
+ global_crops_size: 518 # this is to set up the position embeddings properly
6
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vitl14_reg4_pretrain.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_large
3
+ patch_size: 14
4
+ num_register_tokens: 4
5
+ interpolate_antialias: true
6
+ interpolate_offset: 0.0
7
+ crops:
8
+ global_crops_size: 518 # this is to set up the position embeddings properly
9
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_pretrain.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_small
3
+ patch_size: 14
4
+ crops:
5
+ global_crops_size: 518 # this is to set up the position embeddings properly
6
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/eval/vits14_reg4_pretrain.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ student:
2
+ arch: vit_small
3
+ patch_size: 14
4
+ num_register_tokens: 4
5
+ interpolate_antialias: true
6
+ interpolate_offset: 0.0
7
+ crops:
8
+ global_crops_size: 518 # this is to set up the position embeddings properly
9
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/ssl_default_config.yaml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ WEIGHTS: ''
3
+ compute_precision:
4
+ grad_scaler: true
5
+ teacher:
6
+ backbone:
7
+ sharding_strategy: SHARD_GRAD_OP
8
+ mixed_precision:
9
+ param_dtype: fp16
10
+ reduce_dtype: fp16
11
+ buffer_dtype: fp32
12
+ dino_head:
13
+ sharding_strategy: SHARD_GRAD_OP
14
+ mixed_precision:
15
+ param_dtype: fp16
16
+ reduce_dtype: fp16
17
+ buffer_dtype: fp32
18
+ ibot_head:
19
+ sharding_strategy: SHARD_GRAD_OP
20
+ mixed_precision:
21
+ param_dtype: fp16
22
+ reduce_dtype: fp16
23
+ buffer_dtype: fp32
24
+ student:
25
+ backbone:
26
+ sharding_strategy: SHARD_GRAD_OP
27
+ mixed_precision:
28
+ param_dtype: fp16
29
+ reduce_dtype: fp16
30
+ buffer_dtype: fp32
31
+ dino_head:
32
+ sharding_strategy: SHARD_GRAD_OP
33
+ mixed_precision:
34
+ param_dtype: fp16
35
+ reduce_dtype: fp32
36
+ buffer_dtype: fp32
37
+ ibot_head:
38
+ sharding_strategy: SHARD_GRAD_OP
39
+ mixed_precision:
40
+ param_dtype: fp16
41
+ reduce_dtype: fp32
42
+ buffer_dtype: fp32
43
+ dino:
44
+ loss_weight: 1.0
45
+ head_n_prototypes: 65536
46
+ head_bottleneck_dim: 256
47
+ head_nlayers: 3
48
+ head_hidden_dim: 2048
49
+ koleo_loss_weight: 0.1
50
+ ibot:
51
+ loss_weight: 1.0
52
+ mask_sample_probability: 0.5
53
+ mask_ratio_min_max:
54
+ - 0.1
55
+ - 0.5
56
+ separate_head: false
57
+ head_n_prototypes: 65536
58
+ head_bottleneck_dim: 256
59
+ head_nlayers: 3
60
+ head_hidden_dim: 2048
61
+ train:
62
+ batch_size_per_gpu: 64
63
+ dataset_path: ImageNet:split=TRAIN
64
+ output_dir: .
65
+ saveckp_freq: 20
66
+ seed: 0
67
+ num_workers: 10
68
+ OFFICIAL_EPOCH_LENGTH: 1250
69
+ cache_dataset: true
70
+ centering: "centering" # or "sinkhorn_knopp"
71
+ student:
72
+ arch: vit_large
73
+ patch_size: 16
74
+ drop_path_rate: 0.3
75
+ layerscale: 1.0e-05
76
+ drop_path_uniform: true
77
+ pretrained_weights: ''
78
+ ffn_layer: "mlp"
79
+ block_chunks: 0
80
+ qkv_bias: true
81
+ proj_bias: true
82
+ ffn_bias: true
83
+ num_register_tokens: 0
84
+ interpolate_antialias: false
85
+ interpolate_offset: 0.1
86
+ teacher:
87
+ momentum_teacher: 0.992
88
+ final_momentum_teacher: 1
89
+ warmup_teacher_temp: 0.04
90
+ teacher_temp: 0.07
91
+ warmup_teacher_temp_epochs: 30
92
+ optim:
93
+ epochs: 100
94
+ weight_decay: 0.04
95
+ weight_decay_end: 0.4
96
+ base_lr: 0.004 # learning rate for a batch size of 1024
97
+ lr: 0. # will be set after applying scaling rule
98
+ warmup_epochs: 10
99
+ min_lr: 1.0e-06
100
+ clip_grad: 3.0
101
+ freeze_last_layer_epochs: 1
102
+ scaling_rule: sqrt_wrt_1024
103
+ patch_embed_lr_mult: 0.2
104
+ layerwise_decay: 0.9
105
+ adamw_beta1: 0.9
106
+ adamw_beta2: 0.999
107
+ crops:
108
+ global_crops_scale:
109
+ - 0.32
110
+ - 1.0
111
+ local_crops_number: 8
112
+ local_crops_scale:
113
+ - 0.05
114
+ - 0.32
115
+ global_crops_size: 224
116
+ local_crops_size: 96
117
+ evaluation:
118
+ eval_period_iterations: 12500
static/facebookresearch_dinov2_main/dinov2/configs/train/vitg14.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dino:
2
+ head_n_prototypes: 131072
3
+ head_bottleneck_dim: 384
4
+ ibot:
5
+ separate_head: true
6
+ head_n_prototypes: 131072
7
+ train:
8
+ batch_size_per_gpu: 12
9
+ dataset_path: ImageNet22k
10
+ centering: sinkhorn_knopp
11
+ student:
12
+ arch: vit_giant2
13
+ patch_size: 14
14
+ drop_path_rate: 0.4
15
+ ffn_layer: swiglufused
16
+ block_chunks: 4
17
+ teacher:
18
+ momentum_teacher: 0.994
19
+ optim:
20
+ epochs: 500
21
+ weight_decay_end: 0.2
22
+ base_lr: 2.0e-04 # learning rate for a batch size of 1024
23
+ warmup_epochs: 80
24
+ layerwise_decay: 1.0
25
+ crops:
26
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/train/vitl14.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dino:
2
+ head_n_prototypes: 131072
3
+ head_bottleneck_dim: 384
4
+ ibot:
5
+ separate_head: true
6
+ head_n_prototypes: 131072
7
+ train:
8
+ batch_size_per_gpu: 32
9
+ dataset_path: ImageNet22k
10
+ centering: sinkhorn_knopp
11
+ student:
12
+ arch: vit_large
13
+ patch_size: 14
14
+ drop_path_rate: 0.4
15
+ ffn_layer: swiglufused
16
+ block_chunks: 4
17
+ teacher:
18
+ momentum_teacher: 0.994
19
+ optim:
20
+ epochs: 500
21
+ weight_decay_end: 0.2
22
+ base_lr: 2.0e-04 # learning rate for a batch size of 1024
23
+ warmup_epochs: 80
24
+ layerwise_decay: 1.0
25
+ crops:
26
+ local_crops_size: 98
static/facebookresearch_dinov2_main/dinov2/configs/train/vitl16_short.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # this corresponds to the default config
2
+ train:
3
+ dataset_path: ImageNet:split=TRAIN
4
+ batch_size_per_gpu: 64
5
+ student:
6
+ block_chunks: 4
static/facebookresearch_dinov2_main/dinov2/data/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from .adapters import DatasetWithEnumeratedTargets
7
+ from .loaders import make_data_loader, make_dataset, SamplerType
8
+ from .collate import collate_data_and_cast
9
+ from .masking import MaskingGenerator
10
+ from .augmentations import DataAugmentationDINO
static/facebookresearch_dinov2_main/dinov2/data/adapters.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from typing import Any, Tuple
7
+
8
+ from torch.utils.data import Dataset
9
+
10
+
11
+ class DatasetWithEnumeratedTargets(Dataset):
12
+ def __init__(self, dataset):
13
+ self._dataset = dataset
14
+
15
+ def get_image_data(self, index: int) -> bytes:
16
+ return self._dataset.get_image_data(index)
17
+
18
+ def get_target(self, index: int) -> Tuple[Any, int]:
19
+ target = self._dataset.get_target(index)
20
+ return (index, target)
21
+
22
+ def __getitem__(self, index: int) -> Tuple[Any, Tuple[Any, int]]:
23
+ image, target = self._dataset[index]
24
+ target = index if target is None else target
25
+ return image, (index, target)
26
+
27
+ def __len__(self) -> int:
28
+ return len(self._dataset)
static/facebookresearch_dinov2_main/dinov2/data/augmentations.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ import logging
7
+
8
+ from torchvision import transforms
9
+
10
+ from .transforms import (
11
+ GaussianBlur,
12
+ make_normalize_transform,
13
+ )
14
+
15
+
16
+ logger = logging.getLogger("dinov2")
17
+
18
+
19
+ class DataAugmentationDINO(object):
20
+ def __init__(
21
+ self,
22
+ global_crops_scale,
23
+ local_crops_scale,
24
+ local_crops_number,
25
+ global_crops_size=224,
26
+ local_crops_size=96,
27
+ ):
28
+ self.global_crops_scale = global_crops_scale
29
+ self.local_crops_scale = local_crops_scale
30
+ self.local_crops_number = local_crops_number
31
+ self.global_crops_size = global_crops_size
32
+ self.local_crops_size = local_crops_size
33
+
34
+ logger.info("###################################")
35
+ logger.info("Using data augmentation parameters:")
36
+ logger.info(f"global_crops_scale: {global_crops_scale}")
37
+ logger.info(f"local_crops_scale: {local_crops_scale}")
38
+ logger.info(f"local_crops_number: {local_crops_number}")
39
+ logger.info(f"global_crops_size: {global_crops_size}")
40
+ logger.info(f"local_crops_size: {local_crops_size}")
41
+ logger.info("###################################")
42
+
43
+ # random resized crop and flip
44
+ self.geometric_augmentation_global = transforms.Compose(
45
+ [
46
+ transforms.RandomResizedCrop(
47
+ global_crops_size, scale=global_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
48
+ ),
49
+ transforms.RandomHorizontalFlip(p=0.5),
50
+ ]
51
+ )
52
+
53
+ self.geometric_augmentation_local = transforms.Compose(
54
+ [
55
+ transforms.RandomResizedCrop(
56
+ local_crops_size, scale=local_crops_scale, interpolation=transforms.InterpolationMode.BICUBIC
57
+ ),
58
+ transforms.RandomHorizontalFlip(p=0.5),
59
+ ]
60
+ )
61
+
62
+ # color distorsions / blurring
63
+ color_jittering = transforms.Compose(
64
+ [
65
+ transforms.RandomApply(
66
+ [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
67
+ p=0.8,
68
+ ),
69
+ transforms.RandomGrayscale(p=0.2),
70
+ ]
71
+ )
72
+
73
+ global_transfo1_extra = GaussianBlur(p=1.0)
74
+
75
+ global_transfo2_extra = transforms.Compose(
76
+ [
77
+ GaussianBlur(p=0.1),
78
+ transforms.RandomSolarize(threshold=128, p=0.2),
79
+ ]
80
+ )
81
+
82
+ local_transfo_extra = GaussianBlur(p=0.5)
83
+
84
+ # normalization
85
+ self.normalize = transforms.Compose(
86
+ [
87
+ transforms.ToTensor(),
88
+ make_normalize_transform(),
89
+ ]
90
+ )
91
+
92
+ self.global_transfo1 = transforms.Compose([color_jittering, global_transfo1_extra, self.normalize])
93
+ self.global_transfo2 = transforms.Compose([color_jittering, global_transfo2_extra, self.normalize])
94
+ self.local_transfo = transforms.Compose([color_jittering, local_transfo_extra, self.normalize])
95
+
96
+ def __call__(self, image):
97
+ output = {}
98
+
99
+ # global crops:
100
+ im1_base = self.geometric_augmentation_global(image)
101
+ global_crop_1 = self.global_transfo1(im1_base)
102
+
103
+ im2_base = self.geometric_augmentation_global(image)
104
+ global_crop_2 = self.global_transfo2(im2_base)
105
+
106
+ output["global_crops"] = [global_crop_1, global_crop_2]
107
+
108
+ # global crops for teacher:
109
+ output["global_crops_teacher"] = [global_crop_1, global_crop_2]
110
+
111
+ # local crops:
112
+ local_crops = [
113
+ self.local_transfo(self.geometric_augmentation_local(image)) for _ in range(self.local_crops_number)
114
+ ]
115
+ output["local_crops"] = local_crops
116
+ output["offsets"] = ()
117
+
118
+ return output
static/facebookresearch_dinov2_main/dinov2/data/collate.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ import torch
7
+ import random
8
+
9
+
10
+ def collate_data_and_cast(samples_list, mask_ratio_tuple, mask_probability, dtype, n_tokens=None, mask_generator=None):
11
+ # dtype = torch.half # TODO: Remove
12
+
13
+ n_global_crops = len(samples_list[0][0]["global_crops"])
14
+ n_local_crops = len(samples_list[0][0]["local_crops"])
15
+
16
+ collated_global_crops = torch.stack([s[0]["global_crops"][i] for i in range(n_global_crops) for s in samples_list])
17
+
18
+ collated_local_crops = torch.stack([s[0]["local_crops"][i] for i in range(n_local_crops) for s in samples_list])
19
+
20
+ B = len(collated_global_crops)
21
+ N = n_tokens
22
+ n_samples_masked = int(B * mask_probability)
23
+ probs = torch.linspace(*mask_ratio_tuple, n_samples_masked + 1)
24
+ upperbound = 0
25
+ masks_list = []
26
+ for i in range(0, n_samples_masked):
27
+ prob_min = probs[i]
28
+ prob_max = probs[i + 1]
29
+ masks_list.append(torch.BoolTensor(mask_generator(int(N * random.uniform(prob_min, prob_max)))))
30
+ upperbound += int(N * prob_max)
31
+ for i in range(n_samples_masked, B):
32
+ masks_list.append(torch.BoolTensor(mask_generator(0)))
33
+
34
+ random.shuffle(masks_list)
35
+
36
+ collated_masks = torch.stack(masks_list).flatten(1)
37
+ mask_indices_list = collated_masks.flatten().nonzero().flatten()
38
+
39
+ masks_weight = (1 / collated_masks.sum(-1).clamp(min=1.0)).unsqueeze(-1).expand_as(collated_masks)[collated_masks]
40
+
41
+ return {
42
+ "collated_global_crops": collated_global_crops.to(dtype),
43
+ "collated_local_crops": collated_local_crops.to(dtype),
44
+ "collated_masks": collated_masks,
45
+ "mask_indices_list": mask_indices_list,
46
+ "masks_weight": masks_weight,
47
+ "upperbound": upperbound,
48
+ "n_masked_patches": torch.full((1,), fill_value=mask_indices_list.shape[0], dtype=torch.long),
49
+ }
static/facebookresearch_dinov2_main/dinov2/data/datasets/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from .image_net import ImageNet
7
+ from .image_net_22k import ImageNet22k
static/facebookresearch_dinov2_main/dinov2/data/datasets/decoders.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from io import BytesIO
7
+ from typing import Any
8
+
9
+ from PIL import Image
10
+
11
+
12
+ class Decoder:
13
+ def decode(self) -> Any:
14
+ raise NotImplementedError
15
+
16
+
17
+ class ImageDataDecoder(Decoder):
18
+ def __init__(self, image_data: bytes) -> None:
19
+ self._image_data = image_data
20
+
21
+ def decode(self) -> Image:
22
+ f = BytesIO(self._image_data)
23
+ return Image.open(f).convert(mode="RGB")
24
+
25
+
26
+ class TargetDecoder(Decoder):
27
+ def __init__(self, target: Any):
28
+ self._target = target
29
+
30
+ def decode(self) -> Any:
31
+ return self._target
static/facebookresearch_dinov2_main/dinov2/data/datasets/extended.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from typing import Any, Tuple
7
+
8
+ from torchvision.datasets import VisionDataset
9
+
10
+ from .decoders import TargetDecoder, ImageDataDecoder
11
+
12
+
13
+ class ExtendedVisionDataset(VisionDataset):
14
+ def __init__(self, *args, **kwargs) -> None:
15
+ super().__init__(*args, **kwargs) # type: ignore
16
+
17
+ def get_image_data(self, index: int) -> bytes:
18
+ raise NotImplementedError
19
+
20
+ def get_target(self, index: int) -> Any:
21
+ raise NotImplementedError
22
+
23
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
24
+ try:
25
+ image_data = self.get_image_data(index)
26
+ image = ImageDataDecoder(image_data).decode()
27
+ except Exception as e:
28
+ raise RuntimeError(f"can not read image for sample {index}") from e
29
+ target = self.get_target(index)
30
+ target = TargetDecoder(target).decode()
31
+
32
+ if self.transforms is not None:
33
+ image, target = self.transforms(image, target)
34
+
35
+ return image, target
36
+
37
+ def __len__(self) -> int:
38
+ raise NotImplementedError
static/facebookresearch_dinov2_main/dinov2/data/datasets/image_net.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ import csv
7
+ from enum import Enum
8
+ import logging
9
+ import os
10
+ from typing import Callable, List, Optional, Tuple, Union
11
+
12
+ import numpy as np
13
+
14
+ from .extended import ExtendedVisionDataset
15
+
16
+
17
+ logger = logging.getLogger("dinov2")
18
+ _Target = int
19
+
20
+
21
+ class _Split(Enum):
22
+ TRAIN = "train"
23
+ VAL = "val"
24
+ TEST = "test" # NOTE: torchvision does not support the test split
25
+
26
+ @property
27
+ def length(self) -> int:
28
+ split_lengths = {
29
+ _Split.TRAIN: 1_281_167,
30
+ _Split.VAL: 50_000,
31
+ _Split.TEST: 100_000,
32
+ }
33
+ return split_lengths[self]
34
+
35
+ def get_dirname(self, class_id: Optional[str] = None) -> str:
36
+ return self.value if class_id is None else os.path.join(self.value, class_id)
37
+
38
+ def get_image_relpath(self, actual_index: int, class_id: Optional[str] = None) -> str:
39
+ dirname = self.get_dirname(class_id)
40
+ if self == _Split.TRAIN:
41
+ basename = f"{class_id}_{actual_index}"
42
+ else: # self in (_Split.VAL, _Split.TEST):
43
+ basename = f"ILSVRC2012_{self.value}_{actual_index:08d}"
44
+ return os.path.join(dirname, basename + ".JPEG")
45
+
46
+ def parse_image_relpath(self, image_relpath: str) -> Tuple[str, int]:
47
+ assert self != _Split.TEST
48
+ dirname, filename = os.path.split(image_relpath)
49
+ class_id = os.path.split(dirname)[-1]
50
+ basename, _ = os.path.splitext(filename)
51
+ actual_index = int(basename.split("_")[-1])
52
+ return class_id, actual_index
53
+
54
+
55
+ class ImageNet(ExtendedVisionDataset):
56
+ Target = Union[_Target]
57
+ Split = Union[_Split]
58
+
59
+ def __init__(
60
+ self,
61
+ *,
62
+ split: "ImageNet.Split",
63
+ root: str,
64
+ extra: str,
65
+ transforms: Optional[Callable] = None,
66
+ transform: Optional[Callable] = None,
67
+ target_transform: Optional[Callable] = None,
68
+ ) -> None:
69
+ super().__init__(root, transforms, transform, target_transform)
70
+ self._extra_root = extra
71
+ self._split = split
72
+
73
+ self._entries = None
74
+ self._class_ids = None
75
+ self._class_names = None
76
+
77
+ @property
78
+ def split(self) -> "ImageNet.Split":
79
+ return self._split
80
+
81
+ def _get_extra_full_path(self, extra_path: str) -> str:
82
+ return os.path.join(self._extra_root, extra_path)
83
+
84
+ def _load_extra(self, extra_path: str) -> np.ndarray:
85
+ extra_full_path = self._get_extra_full_path(extra_path)
86
+ return np.load(extra_full_path, mmap_mode="r")
87
+
88
+ def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
89
+ extra_full_path = self._get_extra_full_path(extra_path)
90
+ os.makedirs(self._extra_root, exist_ok=True)
91
+ np.save(extra_full_path, extra_array)
92
+
93
+ @property
94
+ def _entries_path(self) -> str:
95
+ return f"entries-{self._split.value.upper()}.npy"
96
+
97
+ @property
98
+ def _class_ids_path(self) -> str:
99
+ return f"class-ids-{self._split.value.upper()}.npy"
100
+
101
+ @property
102
+ def _class_names_path(self) -> str:
103
+ return f"class-names-{self._split.value.upper()}.npy"
104
+
105
+ def _get_entries(self) -> np.ndarray:
106
+ if self._entries is None:
107
+ self._entries = self._load_extra(self._entries_path)
108
+ assert self._entries is not None
109
+ return self._entries
110
+
111
+ def _get_class_ids(self) -> np.ndarray:
112
+ if self._split == _Split.TEST:
113
+ assert False, "Class IDs are not available in TEST split"
114
+ if self._class_ids is None:
115
+ self._class_ids = self._load_extra(self._class_ids_path)
116
+ assert self._class_ids is not None
117
+ return self._class_ids
118
+
119
+ def _get_class_names(self) -> np.ndarray:
120
+ if self._split == _Split.TEST:
121
+ assert False, "Class names are not available in TEST split"
122
+ if self._class_names is None:
123
+ self._class_names = self._load_extra(self._class_names_path)
124
+ assert self._class_names is not None
125
+ return self._class_names
126
+
127
+ def find_class_id(self, class_index: int) -> str:
128
+ class_ids = self._get_class_ids()
129
+ return str(class_ids[class_index])
130
+
131
+ def find_class_name(self, class_index: int) -> str:
132
+ class_names = self._get_class_names()
133
+ return str(class_names[class_index])
134
+
135
+ def get_image_data(self, index: int) -> bytes:
136
+ entries = self._get_entries()
137
+ actual_index = entries[index]["actual_index"]
138
+
139
+ class_id = self.get_class_id(index)
140
+
141
+ image_relpath = self.split.get_image_relpath(actual_index, class_id)
142
+ image_full_path = os.path.join(self.root, image_relpath)
143
+ with open(image_full_path, mode="rb") as f:
144
+ image_data = f.read()
145
+ return image_data
146
+
147
+ def get_target(self, index: int) -> Optional[Target]:
148
+ entries = self._get_entries()
149
+ class_index = entries[index]["class_index"]
150
+ return None if self.split == _Split.TEST else int(class_index)
151
+
152
+ def get_targets(self) -> Optional[np.ndarray]:
153
+ entries = self._get_entries()
154
+ return None if self.split == _Split.TEST else entries["class_index"]
155
+
156
+ def get_class_id(self, index: int) -> Optional[str]:
157
+ entries = self._get_entries()
158
+ class_id = entries[index]["class_id"]
159
+ return None if self.split == _Split.TEST else str(class_id)
160
+
161
+ def get_class_name(self, index: int) -> Optional[str]:
162
+ entries = self._get_entries()
163
+ class_name = entries[index]["class_name"]
164
+ return None if self.split == _Split.TEST else str(class_name)
165
+
166
+ def __len__(self) -> int:
167
+ entries = self._get_entries()
168
+ assert len(entries) == self.split.length
169
+ return len(entries)
170
+
171
+ def _load_labels(self, labels_path: str) -> List[Tuple[str, str]]:
172
+ labels_full_path = os.path.join(self.root, labels_path)
173
+ labels = []
174
+
175
+ try:
176
+ with open(labels_full_path, "r") as f:
177
+ reader = csv.reader(f)
178
+ for row in reader:
179
+ class_id, class_name = row
180
+ labels.append((class_id, class_name))
181
+ except OSError as e:
182
+ raise RuntimeError(f'can not read labels file "{labels_full_path}"') from e
183
+
184
+ return labels
185
+
186
+ def _dump_entries(self) -> None:
187
+ split = self.split
188
+ if split == ImageNet.Split.TEST:
189
+ dataset = None
190
+ sample_count = split.length
191
+ max_class_id_length, max_class_name_length = 0, 0
192
+ else:
193
+ labels_path = "labels.txt"
194
+ logger.info(f'loading labels from "{labels_path}"')
195
+ labels = self._load_labels(labels_path)
196
+
197
+ # NOTE: Using torchvision ImageFolder for consistency
198
+ from torchvision.datasets import ImageFolder
199
+
200
+ dataset_root = os.path.join(self.root, split.get_dirname())
201
+ dataset = ImageFolder(dataset_root)
202
+ sample_count = len(dataset)
203
+ max_class_id_length, max_class_name_length = -1, -1
204
+ for sample in dataset.samples:
205
+ _, class_index = sample
206
+ class_id, class_name = labels[class_index]
207
+ max_class_id_length = max(len(class_id), max_class_id_length)
208
+ max_class_name_length = max(len(class_name), max_class_name_length)
209
+
210
+ dtype = np.dtype(
211
+ [
212
+ ("actual_index", "<u4"),
213
+ ("class_index", "<u4"),
214
+ ("class_id", f"U{max_class_id_length}"),
215
+ ("class_name", f"U{max_class_name_length}"),
216
+ ]
217
+ )
218
+ entries_array = np.empty(sample_count, dtype=dtype)
219
+
220
+ if split == ImageNet.Split.TEST:
221
+ old_percent = -1
222
+ for index in range(sample_count):
223
+ percent = 100 * (index + 1) // sample_count
224
+ if percent > old_percent:
225
+ logger.info(f"creating entries: {percent}%")
226
+ old_percent = percent
227
+
228
+ actual_index = index + 1
229
+ class_index = np.uint32(-1)
230
+ class_id, class_name = "", ""
231
+ entries_array[index] = (actual_index, class_index, class_id, class_name)
232
+ else:
233
+ class_names = {class_id: class_name for class_id, class_name in labels}
234
+
235
+ assert dataset
236
+ old_percent = -1
237
+ for index in range(sample_count):
238
+ percent = 100 * (index + 1) // sample_count
239
+ if percent > old_percent:
240
+ logger.info(f"creating entries: {percent}%")
241
+ old_percent = percent
242
+
243
+ image_full_path, class_index = dataset.samples[index]
244
+ image_relpath = os.path.relpath(image_full_path, self.root)
245
+ class_id, actual_index = split.parse_image_relpath(image_relpath)
246
+ class_name = class_names[class_id]
247
+ entries_array[index] = (actual_index, class_index, class_id, class_name)
248
+
249
+ logger.info(f'saving entries to "{self._entries_path}"')
250
+ self._save_extra(entries_array, self._entries_path)
251
+
252
+ def _dump_class_ids_and_names(self) -> None:
253
+ split = self.split
254
+ if split == ImageNet.Split.TEST:
255
+ return
256
+
257
+ entries_array = self._load_extra(self._entries_path)
258
+
259
+ max_class_id_length, max_class_name_length, max_class_index = -1, -1, -1
260
+ for entry in entries_array:
261
+ class_index, class_id, class_name = (
262
+ entry["class_index"],
263
+ entry["class_id"],
264
+ entry["class_name"],
265
+ )
266
+ max_class_index = max(int(class_index), max_class_index)
267
+ max_class_id_length = max(len(str(class_id)), max_class_id_length)
268
+ max_class_name_length = max(len(str(class_name)), max_class_name_length)
269
+
270
+ class_count = max_class_index + 1
271
+ class_ids_array = np.empty(class_count, dtype=f"U{max_class_id_length}")
272
+ class_names_array = np.empty(class_count, dtype=f"U{max_class_name_length}")
273
+ for entry in entries_array:
274
+ class_index, class_id, class_name = (
275
+ entry["class_index"],
276
+ entry["class_id"],
277
+ entry["class_name"],
278
+ )
279
+ class_ids_array[class_index] = class_id
280
+ class_names_array[class_index] = class_name
281
+
282
+ logger.info(f'saving class IDs to "{self._class_ids_path}"')
283
+ self._save_extra(class_ids_array, self._class_ids_path)
284
+
285
+ logger.info(f'saving class names to "{self._class_names_path}"')
286
+ self._save_extra(class_names_array, self._class_names_path)
287
+
288
+ def dump_extra(self) -> None:
289
+ self._dump_entries()
290
+ self._dump_class_ids_and_names()
static/facebookresearch_dinov2_main/dinov2/data/datasets/image_net_22k.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ #
3
+ # This source code is licensed under the Apache License, Version 2.0
4
+ # found in the LICENSE file in the root directory of this source tree.
5
+
6
+ from dataclasses import dataclass
7
+ from enum import Enum
8
+ from functools import lru_cache
9
+ from gzip import GzipFile
10
+ from io import BytesIO
11
+ from mmap import ACCESS_READ, mmap
12
+ import os
13
+ from typing import Any, Callable, List, Optional, Set, Tuple
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from .extended import ExtendedVisionDataset
19
+
20
+
21
+ _Labels = int
22
+
23
+ _DEFAULT_MMAP_CACHE_SIZE = 16 # Warning: This can exhaust file descriptors
24
+
25
+
26
+ @dataclass
27
+ class _ClassEntry:
28
+ block_offset: int
29
+ maybe_filename: Optional[str] = None
30
+
31
+
32
+ @dataclass
33
+ class _Entry:
34
+ class_index: int # noqa: E701
35
+ start_offset: int
36
+ end_offset: int
37
+ filename: str
38
+
39
+
40
+ class _Split(Enum):
41
+ TRAIN = "train"
42
+ VAL = "val"
43
+
44
+ @property
45
+ def length(self) -> int:
46
+ return {
47
+ _Split.TRAIN: 11_797_647,
48
+ _Split.VAL: 561_050,
49
+ }[self]
50
+
51
+ def entries_path(self):
52
+ return f"imagenet21kp_{self.value}.txt"
53
+
54
+
55
+ def _get_tarball_path(class_id: str) -> str:
56
+ return f"{class_id}.tar"
57
+
58
+
59
+ def _make_mmap_tarball(tarballs_root: str, mmap_cache_size: int):
60
+ @lru_cache(maxsize=mmap_cache_size)
61
+ def _mmap_tarball(class_id: str) -> mmap:
62
+ tarball_path = _get_tarball_path(class_id)
63
+ tarball_full_path = os.path.join(tarballs_root, tarball_path)
64
+ with open(tarball_full_path) as f:
65
+ return mmap(fileno=f.fileno(), length=0, access=ACCESS_READ)
66
+
67
+ return _mmap_tarball
68
+
69
+
70
+ class ImageNet22k(ExtendedVisionDataset):
71
+ _GZIPPED_INDICES: Set[int] = {
72
+ 841_545,
73
+ 1_304_131,
74
+ 2_437_921,
75
+ 2_672_079,
76
+ 2_795_676,
77
+ 2_969_786,
78
+ 6_902_965,
79
+ 6_903_550,
80
+ 6_903_628,
81
+ 7_432_557,
82
+ 7_432_589,
83
+ 7_813_809,
84
+ 8_329_633,
85
+ 10_296_990,
86
+ 10_417_652,
87
+ 10_492_265,
88
+ 10_598_078,
89
+ 10_782_398,
90
+ 10_902_612,
91
+ 11_203_736,
92
+ 11_342_890,
93
+ 11_397_596,
94
+ 11_589_762,
95
+ 11_705_103,
96
+ 12_936_875,
97
+ 13_289_782,
98
+ }
99
+ Labels = _Labels
100
+
101
+ def __init__(
102
+ self,
103
+ *,
104
+ root: str,
105
+ extra: str,
106
+ transforms: Optional[Callable] = None,
107
+ transform: Optional[Callable] = None,
108
+ target_transform: Optional[Callable] = None,
109
+ mmap_cache_size: int = _DEFAULT_MMAP_CACHE_SIZE,
110
+ ) -> None:
111
+ super().__init__(root, transforms, transform, target_transform)
112
+ self._extra_root = extra
113
+
114
+ entries_path = self._get_entries_path(root)
115
+ self._entries = self._load_extra(entries_path)
116
+
117
+ class_ids_path = self._get_class_ids_path(root)
118
+ self._class_ids = self._load_extra(class_ids_path)
119
+
120
+ self._gzipped_indices = ImageNet22k._GZIPPED_INDICES
121
+ self._mmap_tarball = _make_mmap_tarball(self._tarballs_root, mmap_cache_size)
122
+
123
+ def _get_entries_path(self, root: Optional[str] = None) -> str:
124
+ return "entries.npy"
125
+
126
+ def _get_class_ids_path(self, root: Optional[str] = None) -> str:
127
+ return "class-ids.npy"
128
+
129
+ def _find_class_ids(self, path: str) -> List[str]:
130
+ class_ids = []
131
+
132
+ with os.scandir(path) as entries:
133
+ for entry in entries:
134
+ root, ext = os.path.splitext(entry.name)
135
+ if ext != ".tar":
136
+ continue
137
+ class_ids.append(root)
138
+
139
+ return sorted(class_ids)
140
+
141
+ def _load_entries_class_ids(self, root: Optional[str] = None) -> Tuple[List[_Entry], List[str]]:
142
+ root = self.get_root(root)
143
+ entries: List[_Entry] = []
144
+ class_ids = self._find_class_ids(root)
145
+
146
+ for class_index, class_id in enumerate(class_ids):
147
+ path = os.path.join(root, "blocks", f"{class_id}.log")
148
+ class_entries = []
149
+
150
+ try:
151
+ with open(path) as f:
152
+ for line in f:
153
+ line = line.rstrip()
154
+ block, filename = line.split(":")
155
+ block_offset = int(block[6:])
156
+ filename = filename[1:]
157
+
158
+ maybe_filename = None
159
+ if filename != "** Block of NULs **":
160
+ maybe_filename = filename
161
+ _, ext = os.path.splitext(filename)
162
+ # assert ext == ".JPEG"
163
+
164
+ class_entry = _ClassEntry(block_offset, maybe_filename)
165
+ class_entries.append(class_entry)
166
+ except OSError as e:
167
+ raise RuntimeError(f'can not read blocks file "{path}"') from e
168
+
169
+ assert class_entries[-1].maybe_filename is None
170
+
171
+ for class_entry1, class_entry2 in zip(class_entries, class_entries[1:]):
172
+ assert class_entry1.block_offset <= class_entry2.block_offset
173
+ start_offset = 512 * class_entry1.block_offset
174
+ end_offset = 512 * class_entry2.block_offset
175
+ assert class_entry1.maybe_filename is not None
176
+ filename = class_entry1.maybe_filename
177
+ entry = _Entry(class_index, start_offset, end_offset, filename)
178
+ # Skip invalid image files (PIL throws UnidentifiedImageError)
179
+ if filename == "n06470073_47249.JPEG":
180
+ continue
181
+ entries.append(entry)
182
+
183
+ return entries, class_ids
184
+
185
+ def _load_extra(self, extra_path: str) -> np.ndarray:
186
+ extra_root = self._extra_root
187
+ extra_full_path = os.path.join(extra_root, extra_path)
188
+ return np.load(extra_full_path, mmap_mode="r")
189
+
190
+ def _save_extra(self, extra_array: np.ndarray, extra_path: str) -> None:
191
+ extra_root = self._extra_root
192
+ extra_full_path = os.path.join(extra_root, extra_path)
193
+ os.makedirs(extra_root, exist_ok=True)
194
+ np.save(extra_full_path, extra_array)
195
+
196
+ @property
197
+ def _tarballs_root(self) -> str:
198
+ return self.root
199
+
200
+ def find_class_id(self, class_index: int) -> str:
201
+ return str(self._class_ids[class_index])
202
+
203
+ def get_image_data(self, index: int) -> bytes:
204
+ entry = self._entries[index]
205
+ class_id = entry["class_id"]
206
+ class_mmap = self._mmap_tarball(class_id)
207
+
208
+ start_offset, end_offset = entry["start_offset"], entry["end_offset"]
209
+ try:
210
+ mapped_data = class_mmap[start_offset:end_offset]
211
+ data = mapped_data[512:] # Skip entry header block
212
+
213
+ if len(data) >= 2 and tuple(data[:2]) == (0x1F, 0x8B):
214
+ assert index in self._gzipped_indices, f"unexpected gzip header for sample {index}"
215
+ with GzipFile(fileobj=BytesIO(data)) as g:
216
+ data = g.read()
217
+ except Exception as e:
218
+ raise RuntimeError(f"can not retrieve image data for sample {index} " f'from "{class_id}" tarball') from e
219
+
220
+ return data
221
+
222
+ def get_target(self, index: int) -> Any:
223
+ return int(self._entries[index]["class_index"])
224
+
225
+ def get_targets(self) -> np.ndarray:
226
+ return self._entries["class_index"]
227
+
228
+ def get_class_id(self, index: int) -> str:
229
+ return str(self._entries[index]["class_id"])
230
+
231
+ def get_class_ids(self) -> np.ndarray:
232
+ return self._entries["class_id"]
233
+
234
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
235
+ with warnings.catch_warnings():
236
+ warnings.simplefilter("ignore")
237
+ return super().__getitem__(index)
238
+
239
+ def __len__(self) -> int:
240
+ return len(self._entries)
241
+
242
+ def _dump_entries(self, *args, **kwargs) -> None:
243
+ entries, class_ids = self._load_entries_class_ids(*args, **kwargs)
244
+
245
+ max_class_id_length, max_filename_length, max_class_index = -1, -1, -1
246
+ for entry in entries:
247
+ class_id = class_ids[entry.class_index]
248
+ max_class_index = max(entry.class_index, max_class_index)
249
+ max_class_id_length = max(len(class_id), max_class_id_length)
250
+ max_filename_length = max(len(entry.filename), max_filename_length)
251
+
252
+ dtype = np.dtype(
253
+ [
254
+ ("class_index", "<u4"),
255
+ ("class_id", f"U{max_class_id_length}"),
256
+ ("start_offset", "<u4"),
257
+ ("end_offset", "<u4"),
258
+ ("filename", f"U{max_filename_length}"),
259
+ ]
260
+ )
261
+ sample_count = len(entries)
262
+ entries_array = np.empty(sample_count, dtype=dtype)
263
+ for i, entry in enumerate(entries):
264
+ class_index = entry.class_index
265
+ class_id = class_ids[class_index]
266
+ start_offset = entry.start_offset
267
+ end_offset = entry.end_offset
268
+ filename = entry.filename
269
+ entries_array[i] = (
270
+ class_index,
271
+ class_id,
272
+ start_offset,
273
+ end_offset,
274
+ filename,
275
+ )
276
+
277
+ entries_path = self._get_entries_path(*args, **kwargs)
278
+ self._save_extra(entries_array, entries_path)
279
+
280
+ def _dump_class_ids(self, *args, **kwargs) -> None:
281
+ entries_path = self._get_entries_path(*args, **kwargs)
282
+ entries_array = self._load_extra(entries_path)
283
+
284
+ max_class_id_length, max_class_index = -1, -1
285
+ for entry in entries_array:
286
+ class_index, class_id = entry["class_index"], entry["class_id"]
287
+ max_class_index = max(int(class_index), max_class_index)
288
+ max_class_id_length = max(len(str(class_id)), max_class_id_length)
289
+
290
+ class_ids_array = np.empty(max_class_index + 1, dtype=f"U{max_class_id_length}")
291
+ for entry in entries_array:
292
+ class_index, class_id = entry["class_index"], entry["class_id"]
293
+ class_ids_array[class_index] = class_id
294
+ class_ids_path = self._get_class_ids_path(*args, **kwargs)
295
+ self._save_extra(class_ids_array, class_ids_path)
296
+
297
+ def _dump_extra(self, *args, **kwargs) -> None:
298
+ self._dump_entries(*args, *kwargs)
299
+ self._dump_class_ids(*args, *kwargs)
300
+
301
+ def dump_extra(self, root: Optional[str] = None) -> None:
302
+ return self._dump_extra(root)