zhichao-geng commited on
Commit
4a6d358
·
verified ·
1 Parent(s): c519dc6

sentence_transformers_support (#5)

Browse files

- Add support for Sentence Transformer (4adc322c5ebf1bb00fd0265139aa6c0ce4237630)
- Add model files using Git LFS (37f8b09ff8c91099b22abb645a495ec12a4d313a)
- Update README.md (cf44a30a23603deab9116af967447f7e96674b84)

README.md CHANGED
@@ -9,6 +9,14 @@ tags:
9
  - passage-retrieval
10
  - document-expansion
11
  - bag-of-words
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  # opensearch-neural-sparse-encoding-doc-v1
@@ -36,6 +44,52 @@ This model is trained on MS MARCO dataset.
36
 
37
  OpenSearch neural sparse feature supports learned sparse retrieval with lucene inverted index. Link: https://opensearch.org/docs/latest/query-dsl/specialized/neural-sparse/. The indexing and search can be performed with OpenSearch high-level API.
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  ## Usage (HuggingFace)
40
  This model is supposed to run inside OpenSearch cluster. But you can also use it outside the cluster, with HuggingFace models API.
41
 
 
9
  - passage-retrieval
10
  - document-expansion
11
  - bag-of-words
12
+ - sentence-transformers
13
+ - sparse-encoder
14
+ - sparse
15
+ - asymmetric
16
+ - inference-free
17
+ - splade
18
+ pipeline_tag: feature-extraction
19
+ library_name: sentence-transformers
20
  ---
21
 
22
  # opensearch-neural-sparse-encoding-doc-v1
 
44
 
45
  OpenSearch neural sparse feature supports learned sparse retrieval with lucene inverted index. Link: https://opensearch.org/docs/latest/query-dsl/specialized/neural-sparse/. The indexing and search can be performed with OpenSearch high-level API.
46
 
47
+ ## Usage (Sentence Transformers)
48
+
49
+ First install the Sentence Transformers library:
50
+
51
+ ```bash
52
+ pip install -U sentence-transformers
53
+ ```
54
+
55
+ Then you can load this model and run inference.
56
+
57
+ ```python
58
+
59
+ from sentence_transformers.sparse_encoder import SparseEncoder
60
+
61
+ # Download from the 🤗 Hub
62
+ model = SparseEncoder("opensearch-project/opensearch-neural-sparse-encoding-doc-v1")
63
+
64
+ query = "What's the weather in ny now?"
65
+ document = "Currently New York is rainy."
66
+
67
+ query_embed = model.encode_query(query)
68
+ document_embed = model.encode_document(document)
69
+
70
+ sim = model.similarity(query_embed, document_embed)
71
+ print(f"Similarity: {sim}")
72
+ # Similarity: tensor([[12.8465]])
73
+
74
+ # Visualize top tokens for each text
75
+ top_k = 3
76
+ print(f"\nTop tokens {top_k} for each text:")
77
+
78
+ decoded_query = model.decode(query_embed, top_k=top_k)
79
+ decoded_document = model.decode(document_embed)
80
+
81
+ for i in range(top_k):
82
+ query_token, query_score = decoded_query[i]
83
+ doc_score = next((score for token, score in decoded_document if token == query_token), 0)
84
+ if doc_score != 0:
85
+ print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
86
+
87
+ # Top tokens 3 for each text:
88
+ # Token: ny, Query score: 5.7729, Document score: 1.0552
89
+ # Token: weather, Query score: 4.5684, Document score: 1.1697
90
+ # Token: now, Query score: 3.5895, Document score: 0.3932
91
+ ```
92
+
93
  ## Usage (HuggingFace)
94
  This model is supposed to run inside OpenSearch cluster. But you can also use it outside the cluster, with HuggingFace models API.
95
 
config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "SparseEncoder",
3
+ "__version__": {
4
+ "sentence_transformers": "5.0.0",
5
+ "transformers": "4.50.3",
6
+ "pytorch": "2.6.0+cu124"
7
+ },
8
+ "prompts": {
9
+ "query": "",
10
+ "document": ""
11
+ },
12
+ "default_prompt_name": null,
13
+ "similarity_fn_name": "dot"
14
+ }
document_1_SpladePooling/config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "pooling_strategy": "max",
3
+ "activation_function": "relu",
4
+ "word_embedding_dimension": null
5
+ }
modules.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Router"
7
+ }
8
+ ]
query_0_IDF/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "frozen": true
3
+ }
query_0_IDF/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:711ec64837a7962d2ae106996079782b7ee87860089a0b2348bf7cb840f252d3
3
+ size 122168
query_0_IDF/special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
query_0_IDF/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
query_0_IDF/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "extra_special_tokens": {},
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "BertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
query_0_IDF/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
router_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "query_0_IDF": "sentence_transformers.sparse_encoder.models.IDF.IDF",
4
+ "": "sentence_transformers.sparse_encoder.models.MLMTransformer.MLMTransformer",
5
+ "document_1_SpladePooling": "sentence_transformers.sparse_encoder.models.SpladePooling.SpladePooling"
6
+ },
7
+ "structure": {
8
+ "query": [
9
+ "query_0_IDF"
10
+ ],
11
+ "document": [
12
+ "",
13
+ "document_1_SpladePooling"
14
+ ]
15
+ },
16
+ "parameters": {
17
+ "default_route": "document",
18
+ "allow_empty_key": true
19
+ }
20
+ }
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": false
4
+ }