Add model
Browse files- README.md +120 -0
- config.json +34 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
README.md
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- timm
|
4 |
+
- image-classification
|
5 |
+
library_name: timm
|
6 |
+
license: apache-2.0
|
7 |
+
---
|
8 |
+
# Model card for vit_base_patch14_reg4_dinov2.lvd142m
|
9 |
+
|
10 |
+
A Vision Transformer (ViT) image feature model with registers. Pretrained on LVD-142M with self-supervised DINOv2 method.
|
11 |
+
|
12 |
+
|
13 |
+
## Model Details
|
14 |
+
- **Model Type:** Image classification / feature backbone
|
15 |
+
- **Model Stats:**
|
16 |
+
- Params (M): 86.6
|
17 |
+
- GMACs: 117.5
|
18 |
+
- Activations (M): 115.0
|
19 |
+
- Image size: 518 x 518
|
20 |
+
- **Papers:**
|
21 |
+
- Vision Transformers Need Registers: https://arxiv.org/abs/2309.16588
|
22 |
+
- DINOv2: Learning Robust Visual Features without Supervision: https://arxiv.org/abs/2304.07193
|
23 |
+
- An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale: https://arxiv.org/abs/2010.11929v2
|
24 |
+
- **Original:** https://github.com/facebookresearch/dinov2
|
25 |
+
- **Pretrain Dataset:** LVD-142M
|
26 |
+
|
27 |
+
## Model Usage
|
28 |
+
### Image Classification
|
29 |
+
```python
|
30 |
+
from urllib.request import urlopen
|
31 |
+
from PIL import Image
|
32 |
+
import timm
|
33 |
+
|
34 |
+
img = Image.open(urlopen(
|
35 |
+
'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
|
36 |
+
))
|
37 |
+
|
38 |
+
model = timm.create_model('vit_base_patch14_reg4_dinov2.lvd142m', pretrained=True)
|
39 |
+
model = model.eval()
|
40 |
+
|
41 |
+
# get model specific transforms (normalization, resize)
|
42 |
+
data_config = timm.data.resolve_model_data_config(model)
|
43 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
44 |
+
|
45 |
+
output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1
|
46 |
+
|
47 |
+
top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5)
|
48 |
+
```
|
49 |
+
|
50 |
+
### Image Embeddings
|
51 |
+
```python
|
52 |
+
from urllib.request import urlopen
|
53 |
+
from PIL import Image
|
54 |
+
import timm
|
55 |
+
|
56 |
+
img = Image.open(urlopen(
|
57 |
+
'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
|
58 |
+
))
|
59 |
+
|
60 |
+
model = timm.create_model(
|
61 |
+
'vit_base_patch14_reg4_dinov2.lvd142m',
|
62 |
+
pretrained=True,
|
63 |
+
num_classes=0, # remove classifier nn.Linear
|
64 |
+
)
|
65 |
+
model = model.eval()
|
66 |
+
|
67 |
+
# get model specific transforms (normalization, resize)
|
68 |
+
data_config = timm.data.resolve_model_data_config(model)
|
69 |
+
transforms = timm.data.create_transform(**data_config, is_training=False)
|
70 |
+
|
71 |
+
output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor
|
72 |
+
|
73 |
+
# or equivalently (without needing to set num_classes=0)
|
74 |
+
|
75 |
+
output = model.forward_features(transforms(img).unsqueeze(0))
|
76 |
+
# output is unpooled, a (1, 1374, 768) shaped tensor
|
77 |
+
|
78 |
+
output = model.forward_head(output, pre_logits=True)
|
79 |
+
# output is a (1, num_features) shaped tensor
|
80 |
+
```
|
81 |
+
|
82 |
+
## Model Comparison
|
83 |
+
Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results).
|
84 |
+
|
85 |
+
## Citation
|
86 |
+
```bibtex
|
87 |
+
@article{darcet2023vision,
|
88 |
+
title={Vision Transformers Need Registers},
|
89 |
+
author={Darcet, Timoth{'e}e and Oquab, Maxime and Mairal, Julien and Bojanowski, Piotr},
|
90 |
+
journal={arXiv preprint arXiv:2309.16588},
|
91 |
+
year={2023}
|
92 |
+
}
|
93 |
+
```
|
94 |
+
```bibtex
|
95 |
+
@misc{oquab2023dinov2,
|
96 |
+
title={DINOv2: Learning Robust Visual Features without Supervision},
|
97 |
+
author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr},
|
98 |
+
journal={arXiv:2304.07193},
|
99 |
+
year={2023}
|
100 |
+
}
|
101 |
+
```
|
102 |
+
```bibtex
|
103 |
+
@article{dosovitskiy2020vit,
|
104 |
+
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
|
105 |
+
author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
|
106 |
+
journal={ICLR},
|
107 |
+
year={2021}
|
108 |
+
}
|
109 |
+
```
|
110 |
+
```bibtex
|
111 |
+
@misc{rw2019timm,
|
112 |
+
author = {Ross Wightman},
|
113 |
+
title = {PyTorch Image Models},
|
114 |
+
year = {2019},
|
115 |
+
publisher = {GitHub},
|
116 |
+
journal = {GitHub repository},
|
117 |
+
doi = {10.5281/zenodo.4414861},
|
118 |
+
howpublished = {\url{https://github.com/huggingface/pytorch-image-models}}
|
119 |
+
}
|
120 |
+
```
|
config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architecture": "vit_base_patch14_reg4_dinov2",
|
3 |
+
"num_classes": 0,
|
4 |
+
"num_features": 768,
|
5 |
+
"global_pool": "token",
|
6 |
+
"pretrained_cfg": {
|
7 |
+
"tag": "lvd142m",
|
8 |
+
"custom_load": false,
|
9 |
+
"input_size": [
|
10 |
+
3,
|
11 |
+
518,
|
12 |
+
518
|
13 |
+
],
|
14 |
+
"fixed_input_size": true,
|
15 |
+
"interpolation": "bicubic",
|
16 |
+
"crop_pct": 1.0,
|
17 |
+
"crop_mode": "center",
|
18 |
+
"mean": [
|
19 |
+
0.485,
|
20 |
+
0.456,
|
21 |
+
0.406
|
22 |
+
],
|
23 |
+
"std": [
|
24 |
+
0.229,
|
25 |
+
0.224,
|
26 |
+
0.225
|
27 |
+
],
|
28 |
+
"num_classes": 0,
|
29 |
+
"pool_size": null,
|
30 |
+
"first_conv": "patch_embed.proj",
|
31 |
+
"classifier": "head",
|
32 |
+
"license": "apache-2.0"
|
33 |
+
}
|
34 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c24ecfb4a1d8ca79193f6b9efcffc461872a09ddac43a0931357e4802931a006
|
3 |
+
size 346344168
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b627fcb1ba108b9a50a0ada611d423d88f0cad650f985de972604a3f6f00c597
|
3 |
+
size 346391878
|