Shitao commited on
Commit
0537228
1 Parent(s): 3806044

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +16 -15
README.md CHANGED
@@ -4,7 +4,7 @@ tags:
4
  - sentence-transformers
5
  - feature-extraction
6
  - sentence-similarity
7
-
8
  ---
9
 
10
  For more details please refer to our github repo: https://github.com/FlagOpen/FlagEmbedding
@@ -84,15 +84,16 @@ pip install -U FlagEmbedding
84
  from FlagEmbedding import BGEM3FlagModel
85
 
86
  model = BGEM3FlagModel('BAAI/bge-m3',
87
- batch_size=12, #
88
- max_length=8192, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
89
  use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
90
 
91
  sentences_1 = ["What is BGE M3?", "Defination of BM25"]
92
  sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
93
  "BM25 is a bag-of-words retrieval function that ranks a set of documents based on the query terms appearing in each document"]
94
 
95
- embeddings_1 = model.encode(sentences_1)['dense_vecs']
 
 
 
96
  embeddings_2 = model.encode(sentences_2)['dense_vecs']
97
  similarity = embeddings_1 @ embeddings_2.T
98
  print(similarity)
@@ -162,13 +163,17 @@ sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical
162
  "BM25 is a bag-of-words retrieval function that ranks a set of documents based on the query terms appearing in each document"]
163
 
164
  sentence_pairs = [[i,j] for i in sentences_1 for j in sentences_2]
165
- print(model.compute_score(sentence_pairs))
 
 
 
 
166
  # {
167
- # 'colbert': [0.7796499729156494, 0.4621465802192688, 0.4523794651031494, 0.7898575067520142],
168
- # 'sparse': [0.05865478515625, 0.0026397705078125, 0.0, 0.0540771484375],
169
- # 'dense': [0.6259765625, 0.347412109375, 0.349853515625, 0.67822265625],
170
- # 'sparse+dense': [0.5266395211219788, 0.2692706882953644, 0.2691181004047394, 0.563307523727417],
171
- # 'colbert+sparse+dense': [0.6366440653800964, 0.3531297743320465, 0.3487969636917114, 0.6618075370788574]
172
  # }
173
  ```
174
 
@@ -220,8 +225,4 @@ If you find this repository useful, please consider giving a star :star: and cit
220
 
221
  ```
222
 
223
- ```
224
-
225
-
226
-
227
-
 
4
  - sentence-transformers
5
  - feature-extraction
6
  - sentence-similarity
7
+ license: mit
8
  ---
9
 
10
  For more details please refer to our github repo: https://github.com/FlagOpen/FlagEmbedding
 
84
  from FlagEmbedding import BGEM3FlagModel
85
 
86
  model = BGEM3FlagModel('BAAI/bge-m3',
 
 
87
  use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
88
 
89
  sentences_1 = ["What is BGE M3?", "Defination of BM25"]
90
  sentences_2 = ["BGE M3 is an embedding model supporting dense retrieval, lexical matching and multi-vector interaction.",
91
  "BM25 is a bag-of-words retrieval function that ranks a set of documents based on the query terms appearing in each document"]
92
 
93
+ embeddings_1 = model.encode(sentences_1,
94
+ batch_size=12,
95
+ max_length=8192, # If you don't need such a long length, you can set a smaller value to speed up the encoding process.
96
+ )['dense_vecs']
97
  embeddings_2 = model.encode(sentences_2)['dense_vecs']
98
  similarity = embeddings_1 @ embeddings_2.T
99
  print(similarity)
 
163
  "BM25 is a bag-of-words retrieval function that ranks a set of documents based on the query terms appearing in each document"]
164
 
165
  sentence_pairs = [[i,j] for i in sentences_1 for j in sentences_2]
166
+
167
+ print(model.compute_score(sentence_pairs,
168
+ max_passage_length=128, # a smaller max length leads to a lower latency
169
+ weights_for_different_modes=[0.4, 0.2, 0.4])) # weights_for_different_modes(w) is used to do weighted sum: w[0]*dense_score + w[1]*sparse_score + w[2]*colbert_score
170
+
171
  # {
172
+ # 'colbert': [0.7796499729156494, 0.4621465802192688, 0.4523794651031494, 0.7898575067520142],
173
+ # 'sparse': [0.195556640625, 0.00879669189453125, 0.0, 0.1802978515625],
174
+ # 'dense': [0.6259765625, 0.347412109375, 0.349853515625, 0.67822265625],
175
+ # 'sparse+dense': [0.482503205537796, 0.23454029858112335, 0.2332356721162796, 0.5122477412223816],
176
+ # 'colbert+sparse+dense': [0.6013619303703308, 0.3255828022956848, 0.32089319825172424, 0.6232916116714478]
177
  # }
178
  ```
179
 
 
225
 
226
  ```
227
 
228
+ ```