Ubuntu
commited on
Commit
•
7c8280a
1
Parent(s):
38c7682
add bin files
Browse files- pytorch_model-00001-of-00008.bin +3 -0
- pytorch_model-00002-of-00008.bin +3 -0
- pytorch_model-00003-of-00008.bin +3 -0
- pytorch_model-00004-of-00008.bin +3 -0
- pytorch_model-00005-of-00008.bin +3 -0
- pytorch_model-00006-of-00008.bin +3 -0
- pytorch_model-00007-of-00008.bin +3 -0
- pytorch_model-00008-of-00008.bin +3 -0
- tokenizer.model +3 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/.gitattributes +32 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/README.md +165 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/config.json +23 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/merges.txt +0 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_config.json +31 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_pytorch_model.bin +3 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/preprocessor_config.json +19 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/pytorch_model.bin +3 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/special_tokens_map.json +1 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer.json +0 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer_config.json +34 -0
- vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/vocab.json +0 -0
pytorch_model-00001-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1ecbabcc9a0cae173a99b6fd776e020c1b8e55a5cfc52633c760ae840c3780e
|
3 |
+
size 9975358895
|
pytorch_model-00002-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38ed55b7a8bcff41fd65a21c447c5458fc87a9e8278a7f782e6090d22e917a1d
|
3 |
+
size 9909328019
|
pytorch_model-00003-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f8d0b644b5ba6e2b95069a0a562a07184c08c0cb29653196523002d57c69ffb
|
3 |
+
size 9747818583
|
pytorch_model-00004-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b7fad804afb6c70473c4a513f6af83d23cc1a1efa4cbb8b8c82601567b274ca
|
3 |
+
size 9747847907
|
pytorch_model-00005-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:138249bb4af01081f07cbabed88fd1008d774580cd97b283983e92500b4bc484
|
3 |
+
size 9747847967
|
pytorch_model-00006-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9d8e611083ae49e13a93b49cf6c61bd37cb6af60a99a1224129b3619716b110
|
3 |
+
size 9938688835
|
pytorch_model-00007-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aa77e118e13cee579708af229fd970964e52e47f12aa6e7984cf1e40fc2aaa60
|
3 |
+
size 9991723306
|
pytorch_model-00008-of-00008.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71dae5d9dc918516037467510c6591881953dc26c5d584de8bd92c01f2940a8b
|
3 |
+
size 1104304609
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:386c49cf943d71aa110361135338c50e38beeff0a66593480421f37b319e1a39
|
3 |
+
size 1033105
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/.gitattributes
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
24 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/README.md
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
widget:
|
4 |
+
- src: >-
|
5 |
+
https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
|
6 |
+
candidate_labels: playing music, playing sports
|
7 |
+
example_title: Cat & Dog
|
8 |
+
library_name: open_clip
|
9 |
+
pipeline_tag: zero-shot-image-classification
|
10 |
+
---
|
11 |
+
# Model Card for CLIP ViT-H/14 - LAION-2B
|
12 |
+
|
13 |
+
# Table of Contents
|
14 |
+
|
15 |
+
1. [Model Details](#model-details)
|
16 |
+
2. [Uses](#uses)
|
17 |
+
3. [Training Details](#training-details)
|
18 |
+
4. [Evaluation](#evaluation)
|
19 |
+
5. [Acknowledgements](#acknowledgements)
|
20 |
+
6. [Citation](#citation)
|
21 |
+
7. [How To Get Started With the Model](#how-to-get-started-with-the-model)
|
22 |
+
|
23 |
+
|
24 |
+
# Model Details
|
25 |
+
|
26 |
+
## Model Description
|
27 |
+
|
28 |
+
A CLIP ViT-H/14 model trained with the LAION-2B English subset of LAION-5B (https://laion.ai/blog/laion-5b/) using OpenCLIP (https://github.com/mlfoundations/open_clip).
|
29 |
+
|
30 |
+
Model training done by Romain Beaumont on the [stability.ai](https://stability.ai/) cluster.
|
31 |
+
|
32 |
+
# Uses
|
33 |
+
|
34 |
+
As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model.
|
35 |
+
|
36 |
+
The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the LAION-5B blog (https://laion.ai/blog/laion-5b/) and upcoming paper include additional discussion as it relates specifically to the training dataset.
|
37 |
+
|
38 |
+
## Direct Use
|
39 |
+
|
40 |
+
Zero-shot image classification, image and text retrieval, among others.
|
41 |
+
|
42 |
+
## Downstream Use
|
43 |
+
|
44 |
+
Image classification and other image task fine-tuning, linear probe image classification, image generation guiding and conditioning, among others.
|
45 |
+
|
46 |
+
## Out-of-Scope Use
|
47 |
+
|
48 |
+
As per the OpenAI models,
|
49 |
+
|
50 |
+
**Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful.
|
51 |
+
|
52 |
+
Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use.
|
53 |
+
|
54 |
+
Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases.
|
55 |
+
|
56 |
+
Further the above notice, the LAION-5B dataset used in training of these models has additional considerations, see below.
|
57 |
+
|
58 |
+
# Training Details
|
59 |
+
|
60 |
+
## Training Data
|
61 |
+
|
62 |
+
This model was trained with the 2 Billion sample English subset of LAION-5B (https://laion.ai/blog/laion-5b/).
|
63 |
+
|
64 |
+
**IMPORTANT NOTE:** The motivation behind dataset creation is to democratize research and experimentation around large-scale multi-modal model training and handling of uncurated, large-scale datasets crawled from publically available internet. Our recommendation is therefore to use the dataset for research purposes. Be aware that this large-scale dataset is uncurated. Keep in mind that the uncurated nature of the dataset means that collected links may lead to strongly discomforting and disturbing content for a human viewer. Therefore, please use the demo links with caution and at your own risk. It is possible to extract a “safe” subset by filtering out samples based on the safety tags (using a customized trained NSFW classifier that we built). While this strongly reduces the chance for encountering potentially harmful content when viewing, we cannot entirely exclude the possibility for harmful content being still present in safe mode, so that the warning holds also there. We think that providing the dataset openly to broad research and other interested communities will allow for transparent investigation of benefits that come along with training large-scale models as well as pitfalls and dangers that may stay unreported or unnoticed when working with closed large datasets that remain restricted to a small community. Providing our dataset openly, we however do not recommend using it for creating ready-to-go industrial products, as the basic research about general properties and safety of such large-scale models, which we would like to encourage with this release, is still in progress.
|
65 |
+
|
66 |
+
## Training Procedure
|
67 |
+
|
68 |
+
Please see [training notes](https://docs.google.com/document/d/1EFbMLRWSSV0LUf9Du1pWzWqgeiIRPwEWX2s1C6mAk5c) and [wandb logs](https://wandb.ai/rom1504/eval_openclip/reports/H-14--VmlldzoyNDAxODQ3).
|
69 |
+
|
70 |
+
# Evaluation
|
71 |
+
|
72 |
+
Evaluation done with code in the [LAION CLIP Benchmark suite](https://github.com/LAION-AI/CLIP_benchmark).
|
73 |
+
|
74 |
+
## Testing Data, Factors & Metrics
|
75 |
+
|
76 |
+
### Testing Data
|
77 |
+
|
78 |
+
The testing is performed with VTAB+ (A combination of VTAB (https://arxiv.org/abs/1910.04867) w/ additional robustness datasets) for classification and COCO and Flickr for retrieval.
|
79 |
+
|
80 |
+
**TODO** - more detail
|
81 |
+
|
82 |
+
## Results
|
83 |
+
|
84 |
+
The model achieves a 78.0 zero-shot top-1 accuracy on ImageNet-1k.
|
85 |
+
|
86 |
+
An initial round of benchmarks have been performed on a wider range of datasets, currently viewable at https://github.com/LAION-AI/CLIP_benchmark/blob/main/benchmark/results.ipynb
|
87 |
+
|
88 |
+
**TODO** - create table for just this model's metrics.
|
89 |
+
|
90 |
+
# Acknowledgements
|
91 |
+
|
92 |
+
Acknowledging [stability.ai](https://stability.ai/) for the compute used to train this model.
|
93 |
+
|
94 |
+
# Citation
|
95 |
+
|
96 |
+
**BibTeX:**
|
97 |
+
|
98 |
+
LAION-5B
|
99 |
+
```bibtex
|
100 |
+
@inproceedings{schuhmann2022laionb,
|
101 |
+
title={{LAION}-5B: An open large-scale dataset for training next generation image-text models},
|
102 |
+
author={Christoph Schuhmann and
|
103 |
+
Romain Beaumont and
|
104 |
+
Richard Vencu and
|
105 |
+
Cade W Gordon and
|
106 |
+
Ross Wightman and
|
107 |
+
Mehdi Cherti and
|
108 |
+
Theo Coombes and
|
109 |
+
Aarush Katta and
|
110 |
+
Clayton Mullis and
|
111 |
+
Mitchell Wortsman and
|
112 |
+
Patrick Schramowski and
|
113 |
+
Srivatsa R Kundurthy and
|
114 |
+
Katherine Crowson and
|
115 |
+
Ludwig Schmidt and
|
116 |
+
Robert Kaczmarczyk and
|
117 |
+
Jenia Jitsev},
|
118 |
+
booktitle={Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track},
|
119 |
+
year={2022},
|
120 |
+
url={https://openreview.net/forum?id=M3Y74vmsMcY}
|
121 |
+
}
|
122 |
+
```
|
123 |
+
|
124 |
+
OpenAI CLIP paper
|
125 |
+
```
|
126 |
+
@inproceedings{Radford2021LearningTV,
|
127 |
+
title={Learning Transferable Visual Models From Natural Language Supervision},
|
128 |
+
author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever},
|
129 |
+
booktitle={ICML},
|
130 |
+
year={2021}
|
131 |
+
}
|
132 |
+
```
|
133 |
+
|
134 |
+
OpenCLIP software
|
135 |
+
```
|
136 |
+
@software{ilharco_gabriel_2021_5143773,
|
137 |
+
author = {Ilharco, Gabriel and
|
138 |
+
Wortsman, Mitchell and
|
139 |
+
Wightman, Ross and
|
140 |
+
Gordon, Cade and
|
141 |
+
Carlini, Nicholas and
|
142 |
+
Taori, Rohan and
|
143 |
+
Dave, Achal and
|
144 |
+
Shankar, Vaishaal and
|
145 |
+
Namkoong, Hongseok and
|
146 |
+
Miller, John and
|
147 |
+
Hajishirzi, Hannaneh and
|
148 |
+
Farhadi, Ali and
|
149 |
+
Schmidt, Ludwig},
|
150 |
+
title = {OpenCLIP},
|
151 |
+
month = jul,
|
152 |
+
year = 2021,
|
153 |
+
note = {If you use this software, please cite it as below.},
|
154 |
+
publisher = {Zenodo},
|
155 |
+
version = {0.1},
|
156 |
+
doi = {10.5281/zenodo.5143773},
|
157 |
+
url = {https://doi.org/10.5281/zenodo.5143773}
|
158 |
+
}
|
159 |
+
```
|
160 |
+
|
161 |
+
# How to Get Started with the Model
|
162 |
+
|
163 |
+
Use the code below to get started with the model.
|
164 |
+
|
165 |
+
** TODO ** - Hugging Face transformers, OpenCLIP, and timm getting started snippets
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448",
|
3 |
+
"architectures": [
|
4 |
+
"CLIPVisionModel"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"dropout": 0.0,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_size": 1280,
|
10 |
+
"image_size": 448,
|
11 |
+
"initializer_factor": 1.0,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 5120,
|
14 |
+
"layer_norm_eps": 1e-05,
|
15 |
+
"model_type": "clip_vision_model",
|
16 |
+
"num_attention_heads": 16,
|
17 |
+
"num_channels": 3,
|
18 |
+
"num_hidden_layers": 32,
|
19 |
+
"patch_size": 14,
|
20 |
+
"projection_dim": 1024,
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.34.0"
|
23 |
+
}
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"model_cfg": {
|
3 |
+
"embed_dim": 1024,
|
4 |
+
"vision_cfg": {
|
5 |
+
"image_size": 224,
|
6 |
+
"layers": 32,
|
7 |
+
"width": 1280,
|
8 |
+
"head_width": 80,
|
9 |
+
"patch_size": 14
|
10 |
+
},
|
11 |
+
"text_cfg": {
|
12 |
+
"context_length": 77,
|
13 |
+
"vocab_size": 49408,
|
14 |
+
"width": 1024,
|
15 |
+
"heads": 16,
|
16 |
+
"layers": 24
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"preprocess_cfg": {
|
20 |
+
"mean": [
|
21 |
+
0.48145466,
|
22 |
+
0.4578275,
|
23 |
+
0.40821073
|
24 |
+
],
|
25 |
+
"std": [
|
26 |
+
0.26862954,
|
27 |
+
0.26130258,
|
28 |
+
0.27577711
|
29 |
+
]
|
30 |
+
}
|
31 |
+
}
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/open_clip_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4
|
3 |
+
size 3944692325
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/preprocessor_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"crop_size": 448,
|
3 |
+
"do_center_crop": true,
|
4 |
+
"do_normalize": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"feature_extractor_type": "CLIPFeatureExtractor",
|
7 |
+
"image_mean": [
|
8 |
+
0.48145466,
|
9 |
+
0.4578275,
|
10 |
+
0.40821073
|
11 |
+
],
|
12 |
+
"image_std": [
|
13 |
+
0.26862954,
|
14 |
+
0.26130258,
|
15 |
+
0.27577711
|
16 |
+
],
|
17 |
+
"resample": 3,
|
18 |
+
"size": 448
|
19 |
+
}
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa0c1b4a16fa93bd8bb53e5190e8624dc6f5de5175b9c6ebf02cc5c545247e6a
|
3 |
+
size 2527168934
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/tokenizer_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"unk_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"single_word": false,
|
5 |
+
"lstrip": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"__type": "AddedToken"
|
9 |
+
},
|
10 |
+
"bos_token": {
|
11 |
+
"content": "<|startoftext|>",
|
12 |
+
"single_word": false,
|
13 |
+
"lstrip": false,
|
14 |
+
"rstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"__type": "AddedToken"
|
17 |
+
},
|
18 |
+
"eos_token": {
|
19 |
+
"content": "<|endoftext|>",
|
20 |
+
"single_word": false,
|
21 |
+
"lstrip": false,
|
22 |
+
"rstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"__type": "AddedToken"
|
25 |
+
},
|
26 |
+
"pad_token": "<|endoftext|>",
|
27 |
+
"add_prefix_space": false,
|
28 |
+
"errors": "replace",
|
29 |
+
"do_lower_case": true,
|
30 |
+
"name_or_path": "openai/clip-vit-base-patch32",
|
31 |
+
"model_max_length": 77,
|
32 |
+
"special_tokens_map_file": "./special_tokens_map.json",
|
33 |
+
"tokenizer_class": "CLIPTokenizer"
|
34 |
+
}
|
vit/clip-vit-H-14-laion2B-s32B-b79K-yi-vl-34B-448/vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|