koziev ilya
commited on
Commit
·
8213f08
1
Parent(s):
faccfc2
first release of the tiny model for paraphrase detection
Browse files- 1_Pooling/config.json +7 -0
- README.md +57 -0
- config.json +27 -0
- config_sentence_transformers.json +7 -0
- modules.json +14 -0
- pytorch_model.bin +3 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +7 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 312,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false
|
7 |
+
}
|
README.md
CHANGED
@@ -1,3 +1,60 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
license: unlicense
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
pipeline_tag: sentence-similarity
|
3 |
+
tags:
|
4 |
+
- sentence-transformers
|
5 |
+
- feature-extraction
|
6 |
+
- sentence-similarity
|
7 |
+
- transformers
|
8 |
+
language: ru
|
9 |
license: unlicense
|
10 |
+
|
11 |
+
widget:
|
12 |
+
- source_sentence: "Кошка ловит мышку"
|
13 |
+
sentences: ["Мышка преследуема кошкой", "Мышка ловит кузнечика", "Кошка ловит кайф"]
|
14 |
---
|
15 |
+
|
16 |
+
|
17 |
+
# SBERT_SYNONYMY
|
18 |
+
|
19 |
+
Это [sentence-transformers](https://www.SBERT.net) модель, предназначенная
|
20 |
+
для определения синонимичности двух коротких текстов, преимущественно одиночных предложений длиной до 10-15 слов.
|
21 |
+
|
22 |
+
Модель вычисляет для текста и вопроса векторы размерностью 312. Косинус угла между этими векторами
|
23 |
+
дает оценку того, насколько они близки по смыслу. В [проекте диалоговой системы](https://github.com/Koziev/chatbot)
|
24 |
+
она используется для определения перефразировок высказыванийи и фактов.
|
25 |
+
|
26 |
+
Модель основана на [cointegrated/rubert-tiny2](https://huggingface.co/cointegrated/rubert-tiny2). Она имеет очень небольшой размер и быстро выполняет инференс даже на CPU.
|
27 |
+
|
28 |
+
|
29 |
+
## Использование с библиотекой (Sentence-Transformers)
|
30 |
+
|
31 |
+
Для удобства установите [sentence-transformers](https://www.SBERT.net):
|
32 |
+
|
33 |
+
```
|
34 |
+
pip install -U sentence-transformers
|
35 |
+
```
|
36 |
+
|
37 |
+
Чтобы определить синонимичность одной пары предложений, можно использовать такой код:
|
38 |
+
|
39 |
+
```
|
40 |
+
import sentence_transformers
|
41 |
+
|
42 |
+
sentences = ["Кошка ловит мышку.", "Мышка преследуема кошкой."]
|
43 |
+
|
44 |
+
model = sentence_transformers.SentenceTransformer('inkoziev/sbert_synonymy')
|
45 |
+
embeddings = model.encode(sentences)
|
46 |
+
|
47 |
+
s = sentence_transformers.util.cos_sim(a=embeddings[0], b=embeddings[1])
|
48 |
+
print('text={} question={} cossim={}'.format(sentences[0], sentences[1], s))
|
49 |
+
```
|
50 |
+
|
51 |
+
## Контакты и цитирование
|
52 |
+
|
53 |
+
```
|
54 |
+
@MISC{rugpt_chitchat,
|
55 |
+
author = {Ilya Koziev},
|
56 |
+
title = {Paraphrase Detection Model},
|
57 |
+
url = {https://huggingface.co/inkoziev/sbert_synonymy},
|
58 |
+
year = 2022
|
59 |
+
}
|
60 |
+
```
|
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "cointegrated/rubert-tiny2",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"emb_size": 312,
|
9 |
+
"gradient_checkpointing": false,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 312,
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 600,
|
15 |
+
"layer_norm_eps": 1e-12,
|
16 |
+
"max_position_embeddings": 2048,
|
17 |
+
"model_type": "bert",
|
18 |
+
"num_attention_heads": 12,
|
19 |
+
"num_hidden_layers": 3,
|
20 |
+
"pad_token_id": 0,
|
21 |
+
"position_embedding_type": "absolute",
|
22 |
+
"torch_dtype": "float32",
|
23 |
+
"transformers_version": "4.21.3",
|
24 |
+
"type_vocab_size": 2,
|
25 |
+
"use_cache": true,
|
26 |
+
"vocab_size": 83828
|
27 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.0.0",
|
4 |
+
"transformers": "4.21.3",
|
5 |
+
"pytorch": "1.8.0.dev20201224+cu110"
|
6 |
+
}
|
7 |
+
}
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7a53423f99139f8108ade47dafc9da238bdd3a4c2060ea5844482ed3702493b
|
3 |
+
size 116809361
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 2048,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"do_basic_tokenize": true,
|
4 |
+
"do_lower_case": false,
|
5 |
+
"mask_token": "[MASK]",
|
6 |
+
"model_max_length": 2048,
|
7 |
+
"name_or_path": "cointegrated/rubert-tiny2",
|
8 |
+
"never_split": null,
|
9 |
+
"pad_token": "[PAD]",
|
10 |
+
"sep_token": "[SEP]",
|
11 |
+
"special_tokens_map_file": null,
|
12 |
+
"strip_accents": null,
|
13 |
+
"tokenize_chinese_chars": true,
|
14 |
+
"tokenizer_class": "BertTokenizer",
|
15 |
+
"unk_token": "[UNK]"
|
16 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|