Add adapter bert-base-multilingual-cased_bh_wiki_pfeiffer version 1
Browse files- README.md +69 -0
- adapter_config.json +41 -0
- head_config.json +19 -0
- pytorch_adapter.bin +3 -0
- pytorch_model_head.bin +3 -0
README.md
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- fill-mask
|
4 |
+
- bert
|
5 |
+
- adapter-transformers
|
6 |
+
- adapterhub:bh/wiki
|
7 |
+
language:
|
8 |
+
- bh
|
9 |
+
license: "apache-2.0"
|
10 |
+
---
|
11 |
+
|
12 |
+
# Adapter `bert-base-multilingual-cased_bh_wiki_pfeiffer` for bert-base-multilingual-cased
|
13 |
+
|
14 |
+
An adapter for the `bert-base-multilingual-cased` model, trained on the [bh/wiki](https://adapterhub.ml/explore/bh/wiki/) dataset and includes a prediction head for masked lm.
|
15 |
+
|
16 |
+
**This adapter was created for usage with the [Adapters](https://github.com/Adapter-Hub/adapters) library.**
|
17 |
+
|
18 |
+
## Usage
|
19 |
+
|
20 |
+
First, install `adapters`:
|
21 |
+
|
22 |
+
```
|
23 |
+
pip install -U adapters
|
24 |
+
```
|
25 |
+
|
26 |
+
Now, the adapter can be loaded and activated like this:
|
27 |
+
|
28 |
+
```python
|
29 |
+
from adapters import AutoAdapterModel
|
30 |
+
|
31 |
+
model = AutoAdapterModel.from_pretrained("bert-base-multilingual-cased")
|
32 |
+
adapter_name = model.load_adapter("AdapterHub/bert-base-multilingual-cased_bh_wiki_pfeiffer")
|
33 |
+
model.set_active_adapters(adapter_name)
|
34 |
+
```
|
35 |
+
|
36 |
+
## Architecture & Training
|
37 |
+
|
38 |
+
- Adapter architecture: pfeiffer
|
39 |
+
- Prediction head: masked lm
|
40 |
+
- Dataset: [bh/wiki](https://adapterhub.ml/explore/bh/wiki/)
|
41 |
+
|
42 |
+
## Author Information
|
43 |
+
|
44 |
+
- Author name(s): Jonas Pfeiffer
|
45 |
+
- Author email: [email protected]
|
46 |
+
- Author links: [Website](https://pfeiffer.ai), [GitHub](https://github.com/JoPfeiff), [Twitter](https://twitter.com/@PfeiffJo)
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
## Citation
|
51 |
+
|
52 |
+
```bibtex
|
53 |
+
@inproceedings{pfeiffer20madx,
|
54 |
+
title = "{MAD-X}: {A}n {A}dapter-{B}ased {F}ramework for {M}ulti-{T}ask {C}ross-{L}ingual {T}ransfer",
|
55 |
+
author = "Pfeiffer, Jonas and
|
56 |
+
Vuli{\'c}, Ivan and
|
57 |
+
Gurevych, Iryna and
|
58 |
+
Ruder, Sebastian",
|
59 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
|
60 |
+
year = "2020",
|
61 |
+
address = "Online",
|
62 |
+
publisher = "Association for Computational Linguistics",
|
63 |
+
url = "https://www.aclweb.org/anthology/2020.emnlp-main.617",
|
64 |
+
pages = "7654--7673",
|
65 |
+
}
|
66 |
+
|
67 |
+
```
|
68 |
+
|
69 |
+
*This adapter has been auto-imported from https://github.com/Adapter-Hub/Hub/blob/master/adapters/ukp/bert-base-multilingual-cased_bh_wiki_pfeiffer.yaml*.
|
adapter_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"adapter_residual_before_ln": false,
|
4 |
+
"cross_adapter": false,
|
5 |
+
"dropout": 0.0,
|
6 |
+
"factorized_phm_W": true,
|
7 |
+
"factorized_phm_rule": false,
|
8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
9 |
+
"init_weights": "bert",
|
10 |
+
"inv_adapter": "nice",
|
11 |
+
"inv_adapter_reduction_factor": 2,
|
12 |
+
"is_parallel": false,
|
13 |
+
"learn_phm": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"ln_after": false,
|
16 |
+
"ln_before": false,
|
17 |
+
"mh_adapter": false,
|
18 |
+
"non_linearity": "gelu_new",
|
19 |
+
"original_ln_after": true,
|
20 |
+
"original_ln_before": true,
|
21 |
+
"output_adapter": true,
|
22 |
+
"phm_bias": true,
|
23 |
+
"phm_c_init": "normal",
|
24 |
+
"phm_dim": 4,
|
25 |
+
"phm_init_range": 0.0001,
|
26 |
+
"phm_layer": false,
|
27 |
+
"phm_rank": 1,
|
28 |
+
"reduction_factor": 2,
|
29 |
+
"residual_before_ln": true,
|
30 |
+
"scaling": 1.0,
|
31 |
+
"shared_W_phm": false,
|
32 |
+
"shared_phm_rule": true,
|
33 |
+
"use_gating": false
|
34 |
+
},
|
35 |
+
"hidden_size": 768,
|
36 |
+
"model_class": "BertAdapterModel",
|
37 |
+
"model_name": "bert-base-multilingual-cased",
|
38 |
+
"model_type": "bert",
|
39 |
+
"name": "bh",
|
40 |
+
"version": "0.2.0"
|
41 |
+
}
|
head_config.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"activation_function": "gelu",
|
4 |
+
"bias": true,
|
5 |
+
"embedding_size": 768,
|
6 |
+
"head_type": "masked_lm",
|
7 |
+
"label2id": null,
|
8 |
+
"layer_norm": true,
|
9 |
+
"layers": 2,
|
10 |
+
"shift_labels": false,
|
11 |
+
"vocab_size": 119547
|
12 |
+
},
|
13 |
+
"hidden_size": 768,
|
14 |
+
"model_class": "BertAdapterModel",
|
15 |
+
"model_name": "bert-base-multilingual-cased",
|
16 |
+
"model_type": "bert",
|
17 |
+
"name": "bh",
|
18 |
+
"version": "0.2.0"
|
19 |
+
}
|
pytorch_adapter.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe786b166d5e93d03311c97ed4398bd0c3a714746372633d5437371db1c26a32
|
3 |
+
size 29570318
|
pytorch_model_head.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d09a98653f7eed8dc6fb7f07914431838078dcef2ab0ee384f230f62587a838
|
3 |
+
size 370097718
|